From 13864dc8f59d1907881dcff28c33fc2594347c4c Mon Sep 17 00:00:00 2001 From: GuEe-GUI <2991707448@qq.com> Date: Sun, 29 Mar 2026 22:16:52 +0800 Subject: [PATCH] [dm][rpmsg] support Remote Processor Messaging (RPMSG) Signed-off-by: GuEe-GUI <2991707448@qq.com> --- components/drivers/Kconfig | 1 + components/drivers/include/drivers/rpmsg.h | 142 +++++ components/drivers/include/rtdevice.h | 4 + components/drivers/rpmsg/Kconfig | 26 + components/drivers/rpmsg/SConscript | 18 + .../drivers/rpmsg/rpmsg-rt-thread-virtio.c | 565 ++++++++++++++++++ components/drivers/rpmsg/rpmsg.c | 327 ++++++++++ components/drivers/rpmsg/rpmsg_char.c | 398 ++++++++++++ components/drivers/rpmsg/rpmsg_ns.c | 122 ++++ 9 files changed, 1603 insertions(+) create mode 100755 components/drivers/include/drivers/rpmsg.h create mode 100755 components/drivers/rpmsg/Kconfig create mode 100755 components/drivers/rpmsg/SConscript create mode 100755 components/drivers/rpmsg/rpmsg-rt-thread-virtio.c create mode 100755 components/drivers/rpmsg/rpmsg.c create mode 100755 components/drivers/rpmsg/rpmsg_char.c create mode 100755 components/drivers/rpmsg/rpmsg_ns.c diff --git a/components/drivers/Kconfig b/components/drivers/Kconfig index f04d3c7f489..386ba72734d 100755 --- a/components/drivers/Kconfig +++ b/components/drivers/Kconfig @@ -25,6 +25,7 @@ rsource "led/Kconfig" rsource "input/Kconfig" rsource "mailbox/Kconfig" rsource "hwspinlock/Kconfig" +rsource "rpmsg/Kconfig" rsource "phye/Kconfig" rsource "ata/Kconfig" rsource "nvme/Kconfig" diff --git a/components/drivers/include/drivers/rpmsg.h b/components/drivers/include/drivers/rpmsg.h new file mode 100755 index 00000000000..e44dfc7537c --- /dev/null +++ b/components/drivers/include/drivers/rpmsg.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#ifndef __RPMSG_H__ +#define __RPMSG_H__ + +#include +#include + +#include +#include +#include + +#define RT_DEVICE_CTRL_RPMSG_CREATE_EPT (RT_DEVICE_CTRL_BASE(Char) + 'R' + 1) +#define RT_DEVICE_CTRL_RPMSG_DESTROY_EPT (RT_DEVICE_CTRL_BASE(Char) + 'R' + 2) +#define RT_DEVICE_CTRL_RPMSG_DATA_OVERWRITE (RT_DEVICE_CTRL_BASE(Char) + 'R' + 3) + +struct rt_rpmsg_device_id +{ +#define RT_RPMSG_NAME_SIZE 32 + char name[RT_RPMSG_NAME_SIZE]; + + const void *data; +}; + +struct rt_rpmsg_ops; +struct rt_rpmsg_endpoint; +struct rt_rpmsg_endpoint_info; + +struct rt_rpmsg_device +{ + struct rt_device parent; + + struct rt_rpmsg_device_id id; + rt_list_t ept_nodes; + struct rt_spinlock lock; + + const struct rt_rpmsg_ops *ops; + void *priv; +}; + +struct rt_rpmsg_driver +{ + struct rt_driver parent; + + const struct rt_rpmsg_device_id *ids; + + rt_err_t (*probe)(struct rt_rpmsg_device *rdev); + rt_err_t (*remove)(struct rt_rpmsg_device *rdev); + rt_err_t (*rx_callback)(struct rt_rpmsg_device *rdev, + rt_uint32_t src, void *data, rt_size_t len); +}; + +typedef rt_err_t (*rt_rpmsg_rx_callback)(struct rt_rpmsg_device *rdev, + rt_uint32_t src, void *data, rt_size_t len); + +struct rt_rpmsg_ops +{ + rt_err_t (*create_endpoint)(struct rt_rpmsg_device *, struct rt_rpmsg_endpoint *, + struct rt_rpmsg_endpoint_info *info); + rt_err_t (*destroy_endpoint)(struct rt_rpmsg_device *, struct rt_rpmsg_endpoint *); + rt_err_t (*send)(struct rt_rpmsg_device *, rt_uint32_t src, rt_uint32_t dst, + const void *data, rt_size_t len, rt_int32_t timeout); +}; + +struct rt_rpmsg_endpoint_info +{ + char name[RT_RPMSG_NAME_SIZE]; + +#define RT_RPMSG_ADDR_ANY 0xffffffff + rt_uint32_t src; + rt_uint32_t dst; +}; + +struct rt_rpmsg_endpoint +{ + rt_list_t list; + struct rt_rpmsg_device *rdev; + + struct rt_rpmsg_endpoint_info info; + rt_rpmsg_rx_callback rx_callback; + + struct rt_spinlock lock; + void *sysdata; + void *priv; +}; + +enum rt_rpmsg_ns_flags +{ + RT_RPMSG_NS_CREATE = 0, + RT_RPMSG_NS_DESTROY = 1, +}; + +rt_packed(struct rt_rpmsg_ns_msg +{ + char name[RT_RPMSG_NAME_SIZE]; + +#define RT_RPMSG_NS_ADDR 0x35 /* 0x35 -> 53 */ + rt_uint32_t addr; + rt_uint32_t flags; +}); + +enum +{ + RT_RPMSG_MODE_MASTER, + RT_RPMSG_MODE_SLAVE, + + RT_RPMSG_MODE_MAX, +}; + +rt_uint32_t rt_rpmsg_mode(void); + +struct rt_rpmsg_endpoint *rt_rpmsg_create_endpoint(struct rt_rpmsg_device *, + struct rt_rpmsg_endpoint_info *info, rt_rpmsg_rx_callback rx_cb); +rt_err_t rt_rpmsg_destroy_endpoint(struct rt_rpmsg_device *, + struct rt_rpmsg_endpoint *); +struct rt_rpmsg_endpoint *rt_rpmsg_find_endpoint(struct rt_rpmsg_device *, + struct rt_rpmsg_endpoint_info *info); + +rt_err_t rt_rpmsg_send(struct rt_rpmsg_endpoint *, + const void *data, rt_size_t len); +rt_err_t rt_rpmsg_sendto(struct rt_rpmsg_endpoint *, rt_uint32_t dst, + const void *data, rt_size_t len); + +rt_err_t rt_rpmsg_send_wait(struct rt_rpmsg_endpoint *, + const void *data, rt_size_t len, rt_int32_t timeout); +rt_err_t rt_rpmsg_sendto_wait(struct rt_rpmsg_endpoint *, rt_uint32_t dst, + const void *data, rt_size_t len, rt_int32_t timeout); + +rt_err_t rt_rpmsg_driver_register(struct rt_rpmsg_driver *rdrv); +rt_err_t rt_rpmsg_device_register(struct rt_rpmsg_device *rdev); + +#define RT_RPMSG_DRIVER_EXPORT(driver) RT_DRIVER_EXPORT(driver, rpmsg, BUILIN) + +#endif /* __RPMSG_H__ */ diff --git a/components/drivers/include/rtdevice.h b/components/drivers/include/rtdevice.h index c3a1277449c..e059828f03c 100644 --- a/components/drivers/include/rtdevice.h +++ b/components/drivers/include/rtdevice.h @@ -78,6 +78,10 @@ extern "C" { #include "drivers/hwspinlock.h" #endif /* RT_USING_HWSPINLOCK */ +#ifdef RT_USING_RPMSG +#include "drivers/rpmsg.h" +#endif /* RT_USING_RPMSG */ + #ifdef RT_USING_BLK #include "drivers/blk.h" #endif /* RT_USING_BLK */ diff --git a/components/drivers/rpmsg/Kconfig b/components/drivers/rpmsg/Kconfig new file mode 100755 index 00000000000..1fad432c874 --- /dev/null +++ b/components/drivers/rpmsg/Kconfig @@ -0,0 +1,26 @@ +menuconfig RT_USING_RPMSG + bool "Using Remote Processor Messaging (RPMSG)" + select RT_USING_DEVICE_IPC + select RT_USING_SYSTEM_WORKQUEUE + default n + +config RT_RPMSG_CHAR_MSG_MAX + int "Char device message receive max" + depends on RT_USING_RPMSG + default 64 + +config RT_RPMSG_CHAR_MSG_SIZE_MAX + int "Char device message size max" + depends on RT_USING_RPMSG + default 256 + +config RT_RPMSG_RT_THREAD_VIRTIO + bool "RT-Thread common VirtIO RPMSG" + depends on RT_USING_RPMSG + depends on RT_USING_MBOX + select RT_USING_SLAB + default y + +if RT_USING_RPMSG + osource "$(SOC_DM_RPMSG_DIR)/Kconfig" +endif diff --git a/components/drivers/rpmsg/SConscript b/components/drivers/rpmsg/SConscript new file mode 100755 index 00000000000..370f2d8892c --- /dev/null +++ b/components/drivers/rpmsg/SConscript @@ -0,0 +1,18 @@ +from building import * + +group = [] + +if not GetDepend(['RT_USING_RPMSG']): + Return('group') + +cwd = GetCurrentDir() +CPPPATH = [cwd + '/../include'] + +src = ['rpmsg.c', 'rpmsg_char.c', 'rpmsg_ns.c'] + +if GetDepend(['RT_RPMSG_RT_THREAD_VIRTIO']): + src += ['rpmsg-rt-thread-virtio.c'] + +group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH) + +Return('group') diff --git a/components/drivers/rpmsg/rpmsg-rt-thread-virtio.c b/components/drivers/rpmsg/rpmsg-rt-thread-virtio.c new file mode 100755 index 00000000000..cae8234f66b --- /dev/null +++ b/components/drivers/rpmsg/rpmsg-rt-thread-virtio.c @@ -0,0 +1,565 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include + +#include + +#define DBG_TAG "rpmsg.rt-thread.virtio" +#define DBG_LVL DBG_INFO +#include + +#include "../virtio/virtio_config/virtio-rpmsg.h" +#include "../virtio/virtio_internal.h" + +/* + * RPMsg virtio AMP device driver + * + * The AMP OS device(s) may be instantiated in one of three equivalent way: + * + * Device Tree node, eg.: + * + * rpmsg_virtio_master { + * compatible = "rt-thread,virtio-rpmsg"; + * shmem = <&soc_sram>; + * mboxes = <&soc_mailbox 0>; + * queue-max = <64>; + * master; + * } + * + * rpmsg_virtio_slave { + * compatible = "rt-thread,virtio-rpmsg"; + * shmem = <&soc_sram>; + * mboxes = <&soc_mailbox 1>; + * queue-max = <64>; + * slave; + * } + * + * The property "queue-max" must be equal. + */ + +#define RPMSG_VIRTIO_REG_MASTER_QUEUE_DESC 0x00 /* Virtual queue's master TX descriptor Area offset from share memory */ +#define RPMSG_VIRTIO_REG_MASTER_QUEUE_AVAIL 0x04 /* Virtual queue's master TX available Ring (Driver Area) offset from share memory */ +#define RPMSG_VIRTIO_REG_MASTER_QUEUE_USED 0x08 /* Virtual queue's master TX used Ring (Device Area) offset from share memory */ +#define RPMSG_VIRTIO_REG_SLAVE_QUEUE_DESC 0x0c /* ... slave TX ... */ +#define RPMSG_VIRTIO_REG_SLAVE_QUEUE_AVAIL 0x10 /* ... slave TX ... */ +#define RPMSG_VIRTIO_REG_SLAVE_QUEUE_USED 0x14 /* ... slave TX ... */ + +#define RPMSG_VIRTIO_EVENT_LINK RT_BIT(0) +#define RPMSG_VIRTIO_EVENT_QUEUE RT_BIT(1) + +typedef rt_uint32_t rpmsg_dma_tag; + +struct rpmsg_virtio +{ + struct rt_virtio_device parent; + struct rt_mbox_client mbox_client; + + rt_bool_t is_master; + rt_uint8_t status; + rt_uint32_t queue_max; + + void *shmem; + void *regs; + struct rt_mbox_chan *chan; + + void *pool; + rt_slab_t slab; + + rt_uint16_t next_idx; + struct virtq rx_virtq, peer_tx_virtq; + + struct rt_spinlock spinlock; +}; + +#define raw_to_rpmsg_virtio(raw) rt_container_of(raw, struct rpmsg_virtio, parent) + +#define rpmsg_virtio_readl(rv, name) \ + rt_le32_to_cpu(HWREG32((rv)->regs + RPMSG_VIRTIO_REG_##name)) +#define rpmsg_virtio_writel(rv, name, value) \ + HWREG32((rv)->regs + RPMSG_VIRTIO_REG_##name) = rt_cpu_to_le32(value) + +static void rpmsg_virtio_rx_callback(struct rt_mbox_client *client, void *data) +{ + rt_ubase_t level; + rt_uint16_t idx_inc = 0; + rt_uint32_t *event = data; + struct rt_virtqueue *vq; + struct rt_virtio_device *vdev; + struct virtq *rx_virtq, *tx_virtq; + struct rpmsg_virtio *rv = rt_container_of(client, struct rpmsg_virtio, mbox_client); + + if (*event & RPMSG_VIRTIO_EVENT_LINK) + { + rt_ubase_t desc, avail, used; + + if (rv->is_master) + { + desc = rpmsg_virtio_readl(rv, SLAVE_QUEUE_DESC); + avail = rpmsg_virtio_readl(rv, SLAVE_QUEUE_AVAIL); + used = rpmsg_virtio_readl(rv, SLAVE_QUEUE_USED); + } + else + { + desc = rpmsg_virtio_readl(rv, MASTER_QUEUE_DESC); + avail = rpmsg_virtio_readl(rv, MASTER_QUEUE_AVAIL); + used = rpmsg_virtio_readl(rv, MASTER_QUEUE_USED); + } + + rv->peer_tx_virtq.desc = (void *)rv->shmem + desc; + rv->peer_tx_virtq.avail = (void *)rv->shmem + avail; + rv->peer_tx_virtq.used = (void *)rv->shmem + used; + rv->next_idx = 0; + } + + rx_virtq = &rv->rx_virtq; + tx_virtq = &rv->peer_tx_virtq; + vdev = &rv->parent; + + /* Process local RX queue */ + while (tx_virtq->avail && rv->next_idx != tx_virtq->avail->idx) + { + rt_uint16_t tx_idx, rx_idx, tx_used_idx, rx_used_idx; + struct virtq_desc *tx_desc, *rx_desc; + + tx_idx = virtio16_to_cpu(vdev, tx_virtq->avail->ring[rv->next_idx % tx_virtq->num]); + rx_idx = virtio16_to_cpu(vdev, rx_virtq->avail->ring[rv->next_idx % rx_virtq->num]); + tx_used_idx = virtio16_to_cpu(vdev, tx_virtq->used->idx) % tx_virtq->num; + rx_used_idx = virtio16_to_cpu(vdev, rx_virtq->used->idx) % rx_virtq->num; + rt_hw_rmb(); + + tx_desc = &tx_virtq->desc[tx_idx]; + rx_desc = &rx_virtq->desc[rx_idx]; + + *(rpmsg_dma_tag *)(rv->shmem + + virtio64_to_cpu(vdev, rx_desc->addr) + + virtio32_to_cpu(vdev, rx_desc->len)) = virtio64_to_cpu(vdev, tx_desc->addr); + + tx_virtq->used->ring[tx_used_idx].id = tx_idx; + tx_virtq->used->ring[tx_used_idx].len = cpu_to_virtio32(vdev, 0); + rx_virtq->used->ring[rx_used_idx].id = rx_idx; + rx_virtq->used->ring[rx_used_idx].len = rx_virtq->desc[rx_idx].len; + + ++rx_virtq->used->idx; + ++rv->next_idx; + ++idx_inc; + rt_hw_wmb(); + } + + if (*event & RPMSG_VIRTIO_EVENT_QUEUE) + { + level = rt_spin_lock_irqsave(&rv->spinlock); + + rt_list_for_each_entry(vq, &rv->parent.vq_node, list) + { + rt_virtqueue_isr(vq->index, vq); + } + + rt_spin_unlock_irqrestore(&rv->spinlock, level); + } + + /* Process peer TX queue */ + if (idx_inc) + { + tx_virtq->used->idx += cpu_to_virtio16(vdev, idx_inc); + rt_hw_wmb(); + } +} + +static rt_bool_t rpmsg_virtio_notify(struct rt_virtqueue *vq) +{ + rt_uint32_t event; + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vq->vdev); + + event = RPMSG_VIRTIO_EVENT_QUEUE; + rt_mbox_send(rv->chan, &event, RT_WAITING_FOREVER); + + return RT_TRUE; +} + +static rt_err_t rpmsg_virtio_get_status(struct rt_virtio_device *vdev, + rt_uint8_t *out_status) +{ + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vdev); + + *out_status = rv->status; + + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_set_status(struct rt_virtio_device *vdev, + rt_uint8_t status) +{ + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vdev); + + rv->status = status; + + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_get_features(struct rt_virtio_device *vdev, + rt_uint64_t *out_features) +{ + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vdev); + + *out_features = RT_BIT_ULL(VIRTIO_F_VERSION_1) | RT_BIT_ULL(VIRTIO_F_ANY_LAYOUT); + + if (rv->is_master) + { + *out_features |= RT_BIT_ULL(VIRTIO_RPMSG_F_NS); + } + + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_set_features(struct rt_virtio_device *vdev) +{ + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_get_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, void *dst, int length) +{ + return -RT_ENOSYS; +} + +static rt_err_t rpmsg_virtio_set_config(struct rt_virtio_device *vdev, + rt_uint32_t offset, const void *src, int length) +{ + return -RT_ENOSYS; +} + +static rt_err_t rpmsg_virtio_install_vqs(struct rt_virtio_device *vdev, int vqs_nr, + struct rt_virtqueue *vqs[], const char *names[], rt_virtqueue_callback cbs[]) +{ + rt_uint32_t event; + struct rt_virtqueue *vq; + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vdev); + + if (vqs_nr != 2) + { + return -RT_EINVAL; + } + + /* Master TX or slave RX */ + vq = rt_virtqueue_create(vdev, names[0], 0, rv->queue_max, ARCH_PAGE_SIZE, + rpmsg_virtio_notify, cbs[0], RT_NULL); + + if (!vq) + { + goto _fail; + } + vqs[0] = vq; + + /* Master RX or slave TX */ + vq = rt_virtqueue_create(vdev, names[1], 1, rv->queue_max, ARCH_PAGE_SIZE, + rpmsg_virtio_notify, cbs[1], RT_NULL); + + if (!vq) + { + goto _fail; + } + vqs[1] = vq; + + if (rv->is_master) + { + rv->rx_virtq.desc = (void *)rt_virtqueue_get_desc_addr(vqs[1]); + rv->rx_virtq.avail = (void *)rt_virtqueue_get_avail_addr(vqs[1]); + rv->rx_virtq.used = (void *)rt_virtqueue_get_used_addr(vqs[1]); + + rpmsg_virtio_writel(rv, MASTER_QUEUE_DESC, + (rt_ubase_t)rt_virtqueue_get_desc_addr(vqs[0]) - (rt_ubase_t)rv->shmem); + rpmsg_virtio_writel(rv, MASTER_QUEUE_AVAIL, + (rt_ubase_t)rt_virtqueue_get_avail_addr(vqs[0]) - (rt_ubase_t)rv->shmem); + rpmsg_virtio_writel(rv, MASTER_QUEUE_USED, + (rt_ubase_t)rt_virtqueue_get_used_addr(vqs[0]) - (rt_ubase_t)rv->shmem); + } + else + { + rv->rx_virtq.desc = (void *)rt_virtqueue_get_desc_addr(vqs[0]); + rv->rx_virtq.avail = (void *)rt_virtqueue_get_avail_addr(vqs[0]); + rv->rx_virtq.used = (void *)rt_virtqueue_get_used_addr(vqs[0]); + + rpmsg_virtio_writel(rv, SLAVE_QUEUE_DESC, + (rt_ubase_t)rt_virtqueue_get_desc_addr(vqs[1]) - (rt_ubase_t)rv->shmem); + rpmsg_virtio_writel(rv, SLAVE_QUEUE_AVAIL, + (rt_ubase_t)rt_virtqueue_get_avail_addr(vqs[1]) - (rt_ubase_t)rv->shmem); + rpmsg_virtio_writel(rv, SLAVE_QUEUE_USED, + (rt_ubase_t)rt_virtqueue_get_used_addr(vqs[1]) - (rt_ubase_t)rv->shmem); + + rv->peer_tx_virtq.desc = (void *)rv->shmem + rpmsg_virtio_readl(rv, MASTER_QUEUE_DESC); + rv->peer_tx_virtq.avail = (void *)rv->shmem + rpmsg_virtio_readl(rv, MASTER_QUEUE_AVAIL); + rv->peer_tx_virtq.used = (void *)rv->shmem + rpmsg_virtio_readl(rv, MASTER_QUEUE_USED); + } + rv->rx_virtq.num = rv->queue_max; + rv->peer_tx_virtq.num = rv->queue_max; + + /* Ask peer to link */ + event = RPMSG_VIRTIO_EVENT_LINK; + rt_mbox_send(rv->chan, &event, RT_WAITING_FOREVER); + + return RT_EOK; + +_fail: + rt_virtio_virtqueue_release(vdev); + + return -RT_ERROR; +} + +static rt_err_t rpmsg_virtio_release_vqs(struct rt_virtio_device *vdev) +{ + struct rt_virtqueue *vq, *vq_next; + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vdev); + + rt_list_for_each_entry_safe(vq, vq_next, &rv->parent.vq_node, list) + { + rt_virtqueue_delete(&rv->parent, vq); + } + + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_control_vqs(struct rt_virtio_device *vdev, + rt_uint32_t cfg, void *data) +{ + return -RT_ENOSYS; +} + +static rt_err_t rpmsg_virtio_generation(struct rt_virtio_device *vdev, + rt_uint32_t *out_counter) +{ + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_reset(struct rt_virtio_device *vdev) +{ + struct rpmsg_virtio *rv = raw_to_rpmsg_virtio(vdev); + + rv->status = 0; + + return RT_EOK; +} + +static const struct rt_virtio_transport rpmsg_virtio_trans = +{ + .get_status = rpmsg_virtio_get_status, + .set_status = rpmsg_virtio_set_status, + .get_features = rpmsg_virtio_get_features, + .set_features = rpmsg_virtio_set_features, + .get_config = rpmsg_virtio_get_config, + .set_config = rpmsg_virtio_set_config, + .install_vqs = rpmsg_virtio_install_vqs, + .release_vqs = rpmsg_virtio_release_vqs, + .control_vqs = rpmsg_virtio_control_vqs, + .generation = rpmsg_virtio_generation, + .reset = rpmsg_virtio_reset, +}; + +static void *rpmsg_virtio_dma_alloc(struct rt_device *dev, rt_size_t size, + rt_ubase_t *dma_handle, rt_ubase_t flags) +{ + void *dma_buffer; + struct rt_virtio_device *vdev = rt_container_of(dev, struct rt_virtio_device, parent); + struct rpmsg_virtio *rv = rt_container_of(vdev, struct rpmsg_virtio, parent); + + /* Ignore flags */ + if (!(dma_buffer = rt_slab_alloc(rv->slab, size))) + { + return dma_buffer; + } + + if (dma_handle) + { + *dma_handle = (rt_ubase_t)(dma_buffer - rv->shmem); + } + + return dma_buffer; +} + +static void rpmsg_virtio_dma_free(struct rt_device *dev, rt_size_t size, + void *cpu_addr, rt_ubase_t dma_handle, rt_ubase_t flags) +{ + struct rt_virtio_device *vdev = rt_container_of(dev, struct rt_virtio_device, parent); + struct rpmsg_virtio *rv = rt_container_of(vdev, struct rpmsg_virtio, parent); + + rt_slab_free(rv->slab, cpu_addr); +} + +static rt_err_t rpmsg_virtio_dma_sync_out_data(struct rt_device *dev, + void *data, rt_size_t size, rt_ubase_t *dma_handle, rt_ubase_t flags) +{ + void *dma_buffer; + + dma_buffer = rpmsg_virtio_dma_alloc(dev, size + sizeof(rpmsg_dma_tag), dma_handle, flags); + + if (!dma_buffer) + { + return -RT_ENOMEM; + } + + rt_memcpy(dma_buffer, data, size); + + return RT_EOK; +} + +static rt_err_t rpmsg_virtio_dma_sync_in_data(struct rt_device *dev, + void *out_data, rt_size_t size, rt_ubase_t dma_handle, rt_ubase_t flags) +{ + void *dma_buffer; + rpmsg_dma_tag dma_tag; + struct rt_virtio_device *vdev = rt_container_of(dev, struct rt_virtio_device, parent); + struct rpmsg_virtio *rv = rt_container_of(vdev, struct rpmsg_virtio, parent); + + dma_buffer = rv->shmem + dma_handle; + dma_tag = *(rpmsg_dma_tag *)(dma_buffer + size); + + rt_memcpy(out_data, rv->shmem + dma_tag, size); + + size += sizeof(rpmsg_dma_tag); + rpmsg_virtio_dma_free(dev, size, dma_buffer, dma_handle, flags); + + return RT_EOK; +} + +static const struct rt_dma_map_ops rpmsg_virtio_dma_ops = +{ + .alloc = rpmsg_virtio_dma_alloc, + .free = rpmsg_virtio_dma_free, + .sync_out_data = rpmsg_virtio_dma_sync_out_data, + .sync_in_data = rpmsg_virtio_dma_sync_in_data, +}; + +static rt_err_t rpmsg_virtio_probe(struct rt_platform_device *pdev) +{ + rt_err_t err; + rt_uint64_t addr, size; + struct rt_ofw_node *shmem_np; + struct rt_virtio_device *vdev; + struct rt_device *dev = &pdev->parent; + struct rpmsg_virtio *rv = rt_calloc(1, sizeof(*rv)); + + if (!rv) + { + return -RT_ENOMEM; + } + + rv->mbox_client.dev = dev; + rv->mbox_client.rx_callback = rpmsg_virtio_rx_callback; + + rv->chan = rt_mbox_request_by_index(&rv->mbox_client, 0); + + if (rt_is_err_or_null(rv->chan)) + { + err = -RT_EINVAL; + LOG_E("Request mailbox fail"); + + goto _fail; + } + + vdev = &rv->parent; + + if ((err = rt_dm_dev_prop_read_u32(dev, "queue-max", &rv->queue_max))) + { + goto _fail; + } + + shmem_np = rt_ofw_parse_phandle(dev->ofw_node, "shmem", 0); + rt_ofw_get_address(shmem_np, 0, &addr, &size); + rt_ofw_node_put(shmem_np); + + rv->shmem = rt_ioremap_cached((void *)addr, size); + + if (!rv->shmem) + { + err = -RT_EIO; + goto _fail; + } + + rv->is_master = rt_dm_dev_prop_read_bool(dev, "master"); + rv->regs = rv->shmem; + rv->pool = rv->shmem + ARCH_PAGE_SIZE; + + size = (size - ARCH_PAGE_SIZE) >> 1; + + if (!rv->is_master) + { + rv->pool += size; + } + + if (!(rv->slab = rt_slab_init(rt_dm_dev_get_name(dev), rv->pool, size))) + { + LOG_E("Create AMP dma pool fail"); + + err = -RT_ENOMEM; + goto _fail; + } + rt_dma_device_set_ops(&vdev->parent, &rpmsg_virtio_dma_ops); + vdev->dma_dispatch = RT_TRUE; + + rt_spin_lock_init(&rv->spinlock); + + vdev->id.device = VIRTIO_DEVICE_ID_RPMSG; + vdev->id.vendor = 0x534f5452; /* RTOS */ + vdev->trans = &rpmsg_virtio_trans; + vdev->parent.ofw_node = dev->ofw_node; + + if ((err = rt_virtio_device_register(vdev))) + { + goto _fail; + } + + dev->user_data = rv; + + return RT_EOK; + +_fail: + if (!rt_is_err_or_null(rv->chan)) + { + rt_mbox_release(rv->chan); + } + if (rv->shmem) + { + rt_iounmap(rv->shmem); + } + rt_free(rv); + + return err; +} + +static rt_err_t rpmsg_virtio_remove(struct rt_platform_device *pdev) +{ + struct rpmsg_virtio *rv = pdev->parent.user_data; + + rt_bus_remove_device(&rv->parent.parent); + + rt_slab_detach(rv->slab); + rt_mbox_release(rv->chan); + rt_iounmap(rv->shmem); + rt_free(rv); + + return RT_EOK; +} + +static const struct rt_ofw_node_id rpmsg_virtio_ofw_ids[] = +{ + { .compatible = "rt-thread,virtio-rpmsg" }, + { /* sentinel */ } +}; + +static struct rt_platform_driver rpmsg_virtio_driver = +{ + .name = "rt-thread-virtio-rpmsg", + .ids = rpmsg_virtio_ofw_ids, + + .probe = rpmsg_virtio_probe, + .remove = rpmsg_virtio_remove, +}; +RT_PLATFORM_DRIVER_EXPORT(rpmsg_virtio_driver); diff --git a/components/drivers/rpmsg/rpmsg.c b/components/drivers/rpmsg/rpmsg.c new file mode 100755 index 00000000000..0b952e8e4f1 --- /dev/null +++ b/components/drivers/rpmsg/rpmsg.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include + +#include +#include +#include +#include + +#define DBG_TAG "rtdm.rpmsg" +#define DBG_LVL DBG_INFO +#include + +static rt_uint32_t rpmsg_mode = RT_RPMSG_MODE_SLAVE; + +static int rpmsg_mode_setup(void) +{ + const char *mode = RT_NULL; + +#ifdef RT_USING_OFW + mode = rt_ofw_bootargs_select("rpmsg.mode=", 0); +#endif + + if (!mode) + { + goto _end; + } + + if (!rt_strcmp(mode, "master")) + { + rpmsg_mode = RT_RPMSG_MODE_MASTER; + } + else if (!rt_strcmp(mode, "slave")) + { + rpmsg_mode = RT_RPMSG_MODE_SLAVE; + } + else + { + LOG_W("Unknown mode of RPMsg: %s", mode); + + return (int)-RT_EINVAL; + } + +_end: + LOG_D("RPMsg mode: %s", rpmsg_mode == RT_RPMSG_MODE_MASTER ? "master" : "slave"); + + return 0; +} +INIT_CORE_EXPORT(rpmsg_mode_setup); + +rt_uint32_t rt_rpmsg_mode(void) +{ + return rpmsg_mode; +} + +struct rt_rpmsg_endpoint *rt_rpmsg_create_endpoint(struct rt_rpmsg_device *rdev, + struct rt_rpmsg_endpoint_info *info, rt_rpmsg_rx_callback rx_cb) +{ + rt_err_t err; + rt_ubase_t level; + struct rt_rpmsg_endpoint *ept; + + RT_ASSERT(rdev != RT_NULL); + RT_ASSERT(info != RT_NULL); + + ept = rt_calloc(1, sizeof(*ept)); + + if (!ept) + { + return rt_err_ptr(-RT_ENOMEM); + } + ept->rdev = rdev; + + rt_memcpy(&ept->info, info, sizeof(ept->info)); + ept->rx_callback = rx_cb ? : rt_container_of(rdev->parent.drv, + struct rt_rpmsg_driver, parent)->rx_callback; + + RT_ASSERT(ept->rx_callback != RT_NULL); + + err = rdev->ops->create_endpoint(rdev, ept, info); + + if (err) + { + rt_free(ept); + return rt_err_ptr(err); + } + + rt_spin_lock_init(&ept->lock); + + rt_list_init(&ept->list); + level = rt_spin_lock_irqsave(&rdev->lock); + rt_list_insert_before(&rdev->ept_nodes, &ept->list); + rt_spin_unlock_irqrestore(&rdev->lock, level); + + return ept; +} + +rt_err_t rt_rpmsg_destroy_endpoint(struct rt_rpmsg_device *rdev, + struct rt_rpmsg_endpoint *ept) +{ + rt_err_t err; + rt_ubase_t level; + + RT_ASSERT(rdev != RT_NULL); + RT_ASSERT(ept != RT_NULL); + + err = rdev->ops->destroy_endpoint(rdev, ept); + + if (err) + { + return err; + } + + level = rt_spin_lock_irqsave(&rdev->lock); + rt_list_remove(&ept->list); + rt_spin_unlock_irqrestore(&rdev->lock, level); + + rt_free(ept); + + return RT_EOK; +} + +struct rt_rpmsg_endpoint *rt_rpmsg_find_endpoint(struct rt_rpmsg_device *rdev, + struct rt_rpmsg_endpoint_info *info) +{ + rt_ubase_t level; + struct rt_rpmsg_endpoint *ept = RT_NULL, *ept_tmp; + + RT_ASSERT(rdev != RT_NULL); + RT_ASSERT(info != RT_NULL); + + level = rt_spin_lock_irqsave(&rdev->lock); + + rt_list_for_each_entry(ept_tmp, &rdev->ept_nodes, list) + { + if (info->src != RT_RPMSG_ADDR_ANY && info->src != ept_tmp->info.src) + { + continue; + } + + if (info->dst != RT_RPMSG_ADDR_ANY && info->dst != ept_tmp->info.dst) + { + continue; + } + + if (info->name[0] && + rt_strncmp(info->name, ept_tmp->info.name, RT_RPMSG_NAME_SIZE)) + { + continue; + } + + ept = ept_tmp; + break; + } + + rt_spin_unlock_irqrestore(&rdev->lock, level); + + return ept; +} + +rt_err_t rt_rpmsg_send(struct rt_rpmsg_endpoint *ept, + const void *data, rt_size_t len) +{ + RT_ASSERT(ept != RT_NULL); + + return rt_rpmsg_sendto(ept, ept->info.dst, data, len); +} + +rt_err_t rt_rpmsg_sendto(struct rt_rpmsg_endpoint *ept, rt_uint32_t dst, + const void *data, rt_size_t len) +{ + RT_ASSERT(ept != RT_NULL); + + return rt_rpmsg_sendto_wait(ept, dst, data, len, 0); +} + +rt_err_t rt_rpmsg_send_wait(struct rt_rpmsg_endpoint *ept, + const void *data, rt_size_t len, rt_int32_t timeout) +{ + RT_ASSERT(ept != RT_NULL); + + return rt_rpmsg_sendto_wait(ept, ept->info.dst, data, len, timeout); +} + +rt_err_t rt_rpmsg_sendto_wait(struct rt_rpmsg_endpoint *ept, rt_uint32_t dst, + const void *data, rt_size_t len, rt_int32_t timeout) +{ + rt_err_t err; + struct rt_rpmsg_device *rdev; + + RT_ASSERT(ept != RT_NULL); + rdev = ept->rdev; + + rt_hw_spin_lock(&ept->lock.lock); + + err = rdev->ops->send(rdev, ept->info.src, dst, data, len, timeout); + + rt_hw_spin_unlock(&ept->lock.lock); + + return err; +} + +static struct rt_bus rpmsg_bus; + +rt_err_t rt_rpmsg_driver_register(struct rt_rpmsg_driver *rdrv) +{ + RT_ASSERT(rdrv != RT_NULL); + + rdrv->parent.bus = &rpmsg_bus; + + return rt_driver_register(&rdrv->parent); +} + +rt_err_t rt_rpmsg_device_register(struct rt_rpmsg_device *rdev) +{ + rt_err_t err; + + if ((err = rt_dm_dev_set_name_auto(&rdev->parent, rdev->id.name)) < 0) + { + return err; + } + + rt_list_init(&rdev->ept_nodes); + rt_spin_lock_init(&rdev->lock); + + return rt_bus_add_device(&rpmsg_bus, &rdev->parent); +} + +static rt_bool_t rpmsg_match(rt_driver_t drv, rt_device_t dev) +{ + const struct rt_rpmsg_device_id *id; + struct rt_rpmsg_driver *rdrv = rt_container_of(drv, struct rt_rpmsg_driver, parent); + struct rt_rpmsg_device *rdev = rt_container_of(dev, struct rt_rpmsg_device, parent); + + for (id = rdrv->ids; id->name[0]; ++id) + { + if (!rt_strncmp(id->name, rdev->id.name, RT_RPMSG_NAME_SIZE)) + { + rdev->id.data = id->data; + + return RT_TRUE; + } + } + + return RT_FALSE; +} + +static rt_err_t rpmsg_probe(rt_device_t dev) +{ + rt_err_t err; + struct rt_rpmsg_driver *rdrv = rt_container_of(dev->drv, struct rt_rpmsg_driver, parent); + struct rt_rpmsg_device *rdev = rt_container_of(dev, struct rt_rpmsg_device, parent); + + err = rt_dm_power_domain_attach(dev, RT_TRUE); + + if (err && err != -RT_EEMPTY) + { + LOG_E("Attach power domain error = %s in device %s", + rt_dm_dev_get_name(dev), rt_strerror(err)); + + return err; + } + + err = rdrv->probe(rdev); + + if (err) + { + rt_dm_power_domain_detach(dev, RT_TRUE); + } + + return err; +} + +static rt_err_t rpmsg_remove(rt_device_t dev) +{ + rt_ubase_t level; + struct rt_rpmsg_endpoint *ept, *ept_next; + struct rt_rpmsg_driver *rdrv = rt_container_of(dev->drv, struct rt_rpmsg_driver, parent); + struct rt_rpmsg_device *rdev = rt_container_of(dev, struct rt_rpmsg_device, parent); + + level = rt_spin_lock_irqsave(&rdev->lock); + + rt_list_for_each_entry_safe(ept, ept_next, &rdev->ept_nodes, list) + { + rt_spin_unlock_irqrestore(&rdev->lock, level); + + rt_rpmsg_destroy_endpoint(rdev, ept); + + level = rt_spin_lock_irqsave(&rdev->lock); + } + + rt_spin_unlock_irqrestore(&rdev->lock, level); + + if (rdrv && rdrv->remove) + { + rdrv->remove(rdev); + } + + rt_dm_power_domain_detach(dev, RT_TRUE); + + return RT_EOK; +} + +static struct rt_bus rpmsg_bus = +{ + .name = "rpmsg", + .match = rpmsg_match, + .probe = rpmsg_probe, + .remove = rpmsg_remove, +}; + +static int rpmsg_bus_init(void) +{ + rt_bus_register(&rpmsg_bus); + + return 0; +} +INIT_CORE_EXPORT(rpmsg_bus_init); diff --git a/components/drivers/rpmsg/rpmsg_char.c b/components/drivers/rpmsg/rpmsg_char.c new file mode 100755 index 00000000000..1434792c6c7 --- /dev/null +++ b/components/drivers/rpmsg/rpmsg_char.c @@ -0,0 +1,398 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include + +#define DBG_TAG "rpmsg.char" +#define DBG_LVL DBG_INFO +#include + +struct rpmsg_char_ctrl; + +struct rpmsg_char +{ + struct rt_device parent; + rt_list_t list; + + struct rpmsg_char_ctrl *rchc; + struct rt_rpmsg_endpoint *ept; + + rt_bool_t is_overwrite; + struct rt_ringbuffer msg_ring; + rt_uint8_t msg_pool[RT_RPMSG_CHAR_MSG_SIZE_MAX * RT_RPMSG_CHAR_MSG_MAX]; +}; + +struct rpmsg_char_ctrl +{ + struct rt_device parent; + + struct rt_rpmsg_device *rdev; + + rt_list_t ept_nodes; + rt_list_t del_ept_nodes; + struct rt_spinlock lock; + struct rt_work del_ept_work; +}; + +#define raw_to_rpmsg_char(raw) rt_container_of(raw, struct rpmsg_char, parent) +#define raw_to_rpmsg_char_ctrl(raw) rt_container_of(raw, struct rpmsg_char_ctrl, parent) + +static struct rt_dm_ida rpmsg_ept_ida = RT_DM_IDA_INIT(RPMSG_EPT); +static struct rt_dm_ida rpmsg_char_ida = RT_DM_IDA_INIT(RPMSG_CHAR); + +static rt_err_t rpmsg_char_open(rt_device_t dev, rt_uint16_t oflag) +{ + rt_ubase_t level; + rt_err_t err = RT_EOK; + struct rpmsg_char_ctrl *rchc; + struct rpmsg_char *this_rch = raw_to_rpmsg_char(dev), *rch, *rch_next; + + rchc = this_rch->rchc; + + level = rt_spin_lock_irqsave(&rchc->lock); + + rt_list_for_each_entry_safe(rch, rch_next, &rchc->del_ept_nodes, list) + { + if (rch == this_rch) + { + /* It's been cleaned. Don't open it. */ + err = -RT_EIO; + break; + } + } + + rt_spin_unlock_irqrestore(&rchc->lock, level); + + return err; +} + +static rt_ssize_t rpmsg_char_read(rt_device_t dev, + rt_off_t pos, void *buffer, rt_size_t size) +{ + struct rpmsg_char *rch = raw_to_rpmsg_char(dev); + + return rt_ringbuffer_get(&rch->msg_ring, buffer, size); +} + +static rt_ssize_t rpmsg_char_write(rt_device_t dev, + rt_off_t pos, const void *buffer, rt_size_t size) +{ + struct rpmsg_char *rch = raw_to_rpmsg_char(dev); + + return rt_rpmsg_send(rch->ept, buffer, size) ? : size; +} + +static rt_err_t rpmsg_char_control(rt_device_t dev, int cmd, void *args) +{ + struct rpmsg_char *rch = raw_to_rpmsg_char(dev); + + if (cmd == RT_DEVICE_CTRL_RPMSG_DESTROY_EPT) + { + if (dev->ref_count == 1) + { + rt_ubase_t level; + + level = rt_spin_lock_irqsave(&rch->rchc->lock); + rt_list_remove(&rch->list); + rt_list_insert_before(&rch->rchc->del_ept_nodes, &rch->list); + rt_spin_unlock_irqrestore(&rch->rchc->lock, level); + + rt_work_submit(&rch->rchc->del_ept_work, + RT_SCHED_PRIV(rt_thread_self()).remaining_tick); + } + + return RT_EOK; + } + + if (cmd == RT_DEVICE_CTRL_RPMSG_DATA_OVERWRITE) + { + rch->is_overwrite = !!args; + + return RT_EOK; + } + + return -RT_EINVAL; +} + +#ifdef RT_USING_DEVICE_OPS +const static struct rt_device_ops rpmsg_char_ops = +{ + .open = rpmsg_char_open, + .read = rpmsg_char_read, + .write = rpmsg_char_write, + .control = rpmsg_char_control, +}; +#endif + +static rt_err_t rpmsg_char_rx_callback(struct rt_rpmsg_device *rdev, + rt_uint32_t src, void *data, rt_size_t len) +{ + rt_size_t res_size; + struct rpmsg_char *rch; + struct rt_rpmsg_endpoint *ept; + struct rt_rpmsg_endpoint_info info; + + RT_ASSERT(len <= RT_RPMSG_CHAR_MSG_SIZE_MAX); + + info.src = RT_RPMSG_ADDR_ANY; + info.dst = src; + info.name[0] = '\0'; + ept = rt_rpmsg_find_endpoint(rdev, &info); + + if (ept) + { + rch = ept->priv; + + if (rch->is_overwrite) + { + res_size = rt_ringbuffer_put_force(&rch->msg_ring, data, len); + } + else + { + res_size = rt_ringbuffer_put(&rch->msg_ring, data, len); + } + } + else + { + return -RT_EINVAL; + } + + return res_size ? RT_EOK : -RT_ENOMEM; +} + +static void rpmsg_char_ctrl_del_ept_work(struct rt_work *work, void *work_data) +{ + rt_ubase_t level; + rt_size_t clean_count = 0; + struct rpmsg_char *rch, *rch_next; + struct rpmsg_char_ctrl *rchc = work_data; + + level = rt_spin_lock_irqsave(&rchc->lock); + + rt_list_for_each_entry_safe(rch, rch_next, &rchc->del_ept_nodes, list) + { + if (rch->parent.open_flag == RT_DEVICE_OFLAG_CLOSE) + { + rt_list_remove(&rch->list); + + rt_spin_unlock_irqrestore(&rchc->lock, level); + + rt_rpmsg_destroy_endpoint(rchc->rdev, rch->ept); + + rt_dm_ida_free(&rpmsg_ept_ida, rch->parent.device_id); + + rt_device_unregister(&rch->parent); + rt_free(rch); + + level = rt_spin_lock_irqsave(&rchc->lock); + + ++clean_count; + } + } + + rt_spin_unlock_irqrestore(&rchc->lock, level); + + if (!clean_count) + { + /* Try again */ + rt_work_submit(&rchc->del_ept_work, RT_TICK_PER_SECOND); + } +} + +static rt_err_t rpmsg_char_ctrl_control(rt_device_t dev, int cmd, void *args) +{ + struct rpmsg_char_ctrl *rchc = raw_to_rpmsg_char_ctrl(dev); + + if (cmd == RT_DEVICE_CTRL_RPMSG_CREATE_EPT && args) + { + int device_id; + rt_ubase_t level; + struct rpmsg_char *rch; + struct rt_rpmsg_endpoint *ept; + struct rt_rpmsg_endpoint_info *info = args; + + if (!info->name[0]) + { + rt_strncpy(info->name, "rpmsg-raw", RT_RPMSG_NAME_SIZE); + } + + ept = rt_rpmsg_create_endpoint(rchc->rdev, info, &rpmsg_char_rx_callback); + + if (rt_is_err(ept)) + { + return rt_ptr_err(ept); + } + + rch = rt_calloc(1, sizeof(*rch)); + + if (!rch) + { + rt_rpmsg_destroy_endpoint(rchc->rdev, ept); + return -RT_ENOMEM; + } + + if ((device_id = rt_dm_ida_alloc(&rpmsg_ept_ida)) < 0) + { + rt_free(rch); + rt_rpmsg_destroy_endpoint(rchc->rdev, ept); + return -RT_EFULL; + } + + ept->priv = rch; + rch->ept = ept; + rch->rchc = rchc; + + rch->parent.type = RT_Device_Class_Char; + #ifdef RT_USING_DEVICE_OPS + rch->parent.ops = &rpmsg_char_ops; + #else + rch->parent.read = rpmsg_char_read; + rch->parent.write = rpmsg_char_write; + rch->parent.control = rpmsg_char_control; + #endif + rch->parent.master_id = rpmsg_ept_ida.master_id; + rch->parent.device_id = device_id; + + rt_ringbuffer_init(&rch->msg_ring, rch->msg_pool, sizeof(rch->msg_pool)); + rt_dm_dev_set_name(&rch->parent, "rpmsg_%ux%u", ept->info.src, ept->info.dst); + + rt_list_init(&rch->list); + + level = rt_spin_lock_irqsave(&rchc->lock); + rt_list_insert_before(&rchc->ept_nodes, &rch->list); + rt_spin_unlock_irqrestore(&rchc->lock, level); + + rt_device_register(&rch->parent, rt_dm_dev_get_name(&rch->parent), + RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE); + + return RT_EOK; + } + + return -RT_EINVAL; +} + +#ifdef RT_USING_DEVICE_OPS +const static struct rt_device_ops rpmsg_char_ctrl_ops = +{ + .control = rpmsg_char_ctrl_control, +}; +#endif + +static rt_err_t rpmsg_char_probe(struct rt_rpmsg_device *rdev) +{ + int device_id; + struct rpmsg_char_ctrl *rchc = rt_calloc(1, sizeof(*rchc)); + + if (!rchc) + { + return -RT_ENOMEM; + } + + if ((device_id = rt_dm_ida_alloc(&rpmsg_char_ida)) < 0) + { + return -RT_EFULL; + } + + rchc->rdev = rdev; + rdev->parent.user_data = rchc; + + rt_list_init(&rchc->ept_nodes); + rt_list_init(&rchc->del_ept_nodes); + rt_spin_lock_init(&rchc->lock); + rt_work_init(&rchc->del_ept_work, rpmsg_char_ctrl_del_ept_work, rchc); + + rt_dm_dev_set_name(&rchc->parent, "rpmsg_char%u", device_id); + + rchc->parent.type = RT_Device_Class_Char; +#ifdef RT_USING_DEVICE_OPS + rchc->parent.ops = &rpmsg_char_ctrl_ops; +#else + rchc->parent.control = rpmsg_char_ctrl_control; +#endif + rchc->parent.master_id = rpmsg_char_ida.master_id; + rchc->parent.device_id = device_id; + + rt_device_register(&rchc->parent, rt_dm_dev_get_name(&rchc->parent), + RT_DEVICE_FLAG_RDWR | RT_DEVICE_FLAG_REMOVABLE); + + return RT_EOK; +} + +static rt_err_t rpmsg_char_remove(struct rt_rpmsg_device *rdev) +{ + rt_ubase_t level; + struct rpmsg_char *rch, *rch_next; + struct rpmsg_char_ctrl *rchc = rdev->parent.user_data; + + level = rt_spin_lock_irqsave(&rchc->lock); + + rt_list_for_each_entry_safe(rch, rch_next, &rchc->ept_nodes, list) + { + rt_list_remove(&rch->list); + + rt_spin_unlock_irqrestore(&rchc->lock, level); + + rt_rpmsg_destroy_endpoint(rchc->rdev, rch->ept); + + rt_dm_ida_free(&rpmsg_ept_ida, rch->parent.device_id); + + rt_device_unregister(&rch->parent); + rt_free(rch); + + level = rt_spin_lock_irqsave(&rchc->lock); + } + + rt_list_for_each_entry_safe(rch, rch_next, &rchc->del_ept_nodes, list) + { + rt_list_remove(&rch->list); + + rt_spin_unlock_irqrestore(&rchc->lock, level); + + rt_rpmsg_destroy_endpoint(rchc->rdev, rch->ept); + + rt_dm_ida_free(&rpmsg_ept_ida, rch->parent.device_id); + + rt_device_unregister(&rch->parent); + rt_free(rch); + + level = rt_spin_lock_irqsave(&rchc->lock); + } + + rt_spin_unlock_irqrestore(&rchc->lock, level); + + rt_dm_ida_free(&rpmsg_char_ida, rchc->parent.device_id); + + rt_device_unregister(&rchc->parent); + + rt_free(rchc); + + return RT_EOK; +} + +static struct rt_rpmsg_device_id rpmsg_char_ids[] = +{ + { .name = "rpmsg-raw" }, + { .name = "rpmsg-char" }, + { /* sentinel */ } +}; + +static struct rt_rpmsg_driver rpmsg_char_driver = +{ + .parent.parent = + { + .name = "rpmsg-char", + }, + .ids = rpmsg_char_ids, + + .probe = rpmsg_char_probe, + .remove = rpmsg_char_remove, +}; +RT_RPMSG_DRIVER_EXPORT(rpmsg_char_driver); diff --git a/components/drivers/rpmsg/rpmsg_ns.c b/components/drivers/rpmsg/rpmsg_ns.c new file mode 100755 index 00000000000..9383cb2fa76 --- /dev/null +++ b/components/drivers/rpmsg/rpmsg_ns.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2006-2023, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2023-02-25 GuEe-GUI the first version + */ + +#include +#include + +#define DBG_TAG "rpmsg.ns" +#define DBG_LVL DBG_INFO +#include + +/* + * Used when rt_rpmsg_create_endpoint(..., RT_NULL) (e.g. remote NS announce); + * application traffic should be bound via a higher layer later. + */ +static rt_err_t rpmsg_ns_remote_default_rx(struct rt_rpmsg_device *rdev, + rt_uint32_t src, void *data, rt_size_t len) +{ + LOG_D("%s: remote endpoint rx (no user cb), src=%u len=%u", rt_dm_dev_get_name(&rdev->parent), src, len); + + return RT_EOK; +} + +static rt_err_t rpmsg_ns_rx_callback(struct rt_rpmsg_device *rdev, + rt_uint32_t src, void *data, rt_size_t len) +{ + rt_err_t err = RT_EOK; + struct rt_rpmsg_ns_msg *msg = data; + struct rt_rpmsg_endpoint *ept; + struct rt_rpmsg_endpoint_info info; + + if (len != sizeof(*msg)) + { + LOG_E("Invalid MSG size = %d", len); + + return -RT_EINVAL; + } + + /* Fixup the name */ + msg->name[RT_RPMSG_NAME_SIZE - 1] = '\0'; + rt_strncpy(info.name, msg->name, RT_RPMSG_NAME_SIZE); + info.src = RT_RPMSG_ADDR_ANY; + info.dst = rt_le32_to_cpu(msg->addr); + + LOG_D("%s: name: %s, src: %u, dst: %u", rt_dm_dev_get_name(&rdev->parent), + info.name, info.src, info.dst); + + if (rt_le32_to_cpu(msg->flags) & RT_RPMSG_NS_DESTROY) + { + ept = rt_rpmsg_find_endpoint(rdev, &info); + + if (ept) + { + err = rt_rpmsg_destroy_endpoint(rdev, ept); + } + else + { + err = -RT_EEMPTY; + } + } + else if (rt_le32_to_cpu(msg->flags) == RT_RPMSG_NS_CREATE) + { + ept = rt_rpmsg_create_endpoint(rdev, &info, RT_NULL); + + if (rt_is_err(ept)) + { + err = rt_ptr_err(ept); + } + } + else + { + LOG_E("Unsupported flags = %x", rt_le32_to_cpu(msg->flags)); + } + + if (err) + { + LOG_E("%s: name = %s, addr = %x flags = %d error = %s", + rt_dm_dev_get_name(&rdev->parent), + msg->name, msg->addr, msg->flags, rt_strerror(err)); + } + + return err; +} + +static rt_err_t rpmsg_ns_probe(struct rt_rpmsg_device *rdev) +{ + struct rt_rpmsg_endpoint *ep; + struct rt_rpmsg_endpoint_info info; + + rt_strncpy(info.name, "name-service", RT_RPMSG_NAME_SIZE); + info.src = RT_RPMSG_NS_ADDR; + info.dst = RT_RPMSG_NS_ADDR; + + ep = rt_rpmsg_create_endpoint(rdev, &info, &rpmsg_ns_rx_callback); + + return rt_is_err(ep) ? rt_ptr_err(ep) : RT_EOK; +} + +static struct rt_rpmsg_device_id rpmsg_ns_ids[] = +{ + { .name = "rpmsg-name-service" }, + { /* sentinel */ } +}; + +static struct rt_rpmsg_driver rpmsg_ns_driver = +{ + .parent.parent = + { + .name = "rpmsg-ns", + }, + .ids = rpmsg_ns_ids, + + .probe = rpmsg_ns_probe, + .rx_callback = rpmsg_ns_remote_default_rx, +}; +RT_RPMSG_DRIVER_EXPORT(rpmsg_ns_driver);