1 /*
2 * Copyright (c) 2021, Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "rpmsg_backend.h"
8
9 #include <zephyr/kernel.h>
10 #include <zephyr/drivers/ipm.h>
11 #include <zephyr/device.h>
12 #include <zephyr/init.h>
13 #include <zephyr/logging/log.h>
14
15 #include <openamp/open_amp.h>
16
17 #define LOG_MODULE_NAME rpmsg_backend
18 LOG_MODULE_REGISTER(LOG_MODULE_NAME, CONFIG_RPMSG_SERVICE_LOG_LEVEL);
19
20 /* Configuration defines */
21 #if !DT_HAS_CHOSEN(zephyr_ipc_shm)
22 #error "Module requires definition of shared memory for rpmsg"
23 #endif
24
25 #define MASTER IS_ENABLED(CONFIG_RPMSG_SERVICE_MODE_MASTER)
26
27 #if MASTER
28 #define VIRTQUEUE_ID 0
29 #define RPMSG_ROLE RPMSG_HOST
30 #else
31 #define VIRTQUEUE_ID 1
32 #define RPMSG_ROLE RPMSG_REMOTE
33 #endif
34
35 /* Configuration defines */
36
37 #define VRING_COUNT 2
38 #define VRING_RX_ADDRESS (VDEV_START_ADDR + SHM_SIZE - VDEV_STATUS_SIZE)
39 #define VRING_TX_ADDRESS (VDEV_START_ADDR + SHM_SIZE)
40 #define VRING_ALIGNMENT 4
41 #define VRING_SIZE 16
42
43 #define IPM_WORK_QUEUE_STACK_SIZE CONFIG_RPMSG_SERVICE_WORK_QUEUE_STACK_SIZE
44 #define IPM_WORK_QUEUE_PRIORITY K_HIGHEST_APPLICATION_THREAD_PRIO
45
46 K_THREAD_STACK_DEFINE(ipm_stack_area, IPM_WORK_QUEUE_STACK_SIZE);
47
48 struct k_work_q ipm_work_q;
49
50 /* End of configuration defines */
51
52 #if defined(CONFIG_RPMSG_SERVICE_DUAL_IPM_SUPPORT)
53 static const struct device *const ipm_tx_handle =
54 DEVICE_DT_GET(DT_CHOSEN(zephyr_ipc_tx));
55 static const struct device *const ipm_rx_handle =
56 DEVICE_DT_GET(DT_CHOSEN(zephyr_ipc_rx));
57 #elif defined(CONFIG_RPMSG_SERVICE_SINGLE_IPM_SUPPORT)
58 static const struct device *const ipm_handle =
59 DEVICE_DT_GET(DT_CHOSEN(zephyr_ipc));
60 #endif
61
62 static metal_phys_addr_t shm_physmap[] = { SHM_START_ADDR };
63 static struct metal_io_region shm_io;
64
65 static struct virtio_vring_info rvrings[2] = {
66 [0] = {
67 .info.align = VRING_ALIGNMENT,
68 },
69 [1] = {
70 .info.align = VRING_ALIGNMENT,
71 },
72 };
73 static struct virtqueue *vqueue[2];
74
75 static struct k_work ipm_work;
76
ipc_virtio_get_status(struct virtio_device * vdev)77 static unsigned char ipc_virtio_get_status(struct virtio_device *vdev)
78 {
79 #if MASTER
80 return VIRTIO_CONFIG_STATUS_DRIVER_OK;
81 #else
82 return sys_read8(VDEV_STATUS_ADDR);
83 #endif
84 }
85
ipc_virtio_set_status(struct virtio_device * vdev,unsigned char status)86 static void ipc_virtio_set_status(struct virtio_device *vdev, unsigned char status)
87 {
88 sys_write8(status, VDEV_STATUS_ADDR);
89 }
90
ipc_virtio_get_features(struct virtio_device * vdev)91 static uint32_t ipc_virtio_get_features(struct virtio_device *vdev)
92 {
93 return BIT(VIRTIO_RPMSG_F_NS);
94 }
95
ipc_virtio_set_features(struct virtio_device * vdev,uint32_t features)96 static void ipc_virtio_set_features(struct virtio_device *vdev, uint32_t features)
97 {
98 }
99
ipc_virtio_notify(struct virtqueue * vq)100 static void ipc_virtio_notify(struct virtqueue *vq)
101 {
102 int status;
103
104 #if defined(CONFIG_RPMSG_SERVICE_DUAL_IPM_SUPPORT)
105 status = ipm_send(ipm_tx_handle, 0, 0, NULL, 0);
106 #elif defined(CONFIG_RPMSG_SERVICE_SINGLE_IPM_SUPPORT)
107
108 #if defined(CONFIG_SOC_MPS2_AN521) || \
109 defined(CONFIG_SOC_V2M_MUSCA_B1)
110 uint32_t current_core = sse_200_platform_get_cpu_id();
111
112 status = ipm_send(ipm_handle, 0, current_core ? 0 : 1, 0, 1);
113 #elif defined(CONFIG_IPM_STM32_HSEM)
114 /* No data transfer, only doorbell. */
115 status = ipm_send(ipm_handle, 0, 0, NULL, 0);
116 #else
117 /* The IPM interface is unclear on whether or not ipm_send
118 * can be called with NULL as data, thus, drivers might cause
119 * problems if you do. To avoid problems, we always send some
120 * dummy data, unless the IPM driver cannot transfer data.
121 * Ref: #68741
122 */
123 uint32_t dummy_data = 0x55005500;
124
125 status = ipm_send(ipm_handle, 0, 0, &dummy_data, sizeof(dummy_data));
126 #endif /* #if defined(CONFIG_SOC_MPS2_AN521) */
127
128 #endif
129
130 if (status != 0) {
131 LOG_ERR("ipm_send failed to notify: %d", status);
132 }
133 }
134
135 const struct virtio_dispatch dispatch = {
136 .get_status = ipc_virtio_get_status,
137 .set_status = ipc_virtio_set_status,
138 .get_features = ipc_virtio_get_features,
139 .set_features = ipc_virtio_set_features,
140 .notify = ipc_virtio_notify,
141 };
142
ipm_callback_process(struct k_work * work)143 static void ipm_callback_process(struct k_work *work)
144 {
145 virtqueue_notification(vqueue[VIRTQUEUE_ID]);
146 }
147
ipm_callback(const struct device * dev,void * context,uint32_t id,volatile void * data)148 static void ipm_callback(const struct device *dev,
149 void *context, uint32_t id,
150 volatile void *data)
151 {
152 (void)dev;
153
154 LOG_DBG("Got callback of id %u", id);
155 /* TODO: Separate workqueue is needed only
156 * for serialization master (app core)
157 *
158 * Use sysworkq to optimize memory footprint
159 * for serialization slave (net core)
160 */
161 k_work_submit_to_queue(&ipm_work_q, &ipm_work);
162 }
163
rpmsg_backend_init(struct metal_io_region ** io,struct virtio_device * vdev)164 int rpmsg_backend_init(struct metal_io_region **io, struct virtio_device *vdev)
165 {
166 int32_t err;
167 struct metal_init_params metal_params = METAL_INIT_DEFAULTS;
168
169 /* Start IPM workqueue */
170 k_work_queue_start(&ipm_work_q, ipm_stack_area,
171 K_THREAD_STACK_SIZEOF(ipm_stack_area),
172 IPM_WORK_QUEUE_PRIORITY, NULL);
173 k_thread_name_set(&ipm_work_q.thread, "ipm_work_q");
174
175 /* Setup IPM workqueue item */
176 k_work_init(&ipm_work, ipm_callback_process);
177
178 /* Libmetal setup */
179 err = metal_init(&metal_params);
180 if (err) {
181 LOG_ERR("metal_init: failed - error code %d", err);
182 return err;
183 }
184
185 /* declare shared memory region */
186 metal_io_init(&shm_io, (void *)SHM_START_ADDR, shm_physmap, SHM_SIZE, -1, 0, NULL);
187 *io = &shm_io;
188
189 /* IPM setup */
190 #if defined(CONFIG_RPMSG_SERVICE_DUAL_IPM_SUPPORT)
191 if (!device_is_ready(ipm_tx_handle)) {
192 LOG_ERR("IPM TX device is not ready");
193 return -ENODEV;
194 }
195
196 if (!device_is_ready(ipm_rx_handle)) {
197 LOG_ERR("IPM RX device is not ready");
198 return -ENODEV;
199 }
200
201 ipm_register_callback(ipm_rx_handle, ipm_callback, NULL);
202
203 err = ipm_set_enabled(ipm_rx_handle, 1);
204 if (err != 0) {
205 LOG_ERR("Could not enable IPM interrupts and callbacks for RX");
206 return err;
207 }
208
209 #elif defined(CONFIG_RPMSG_SERVICE_SINGLE_IPM_SUPPORT)
210 if (!device_is_ready(ipm_handle)) {
211 LOG_ERR("IPM device is not ready");
212 return -ENODEV;
213 }
214
215 ipm_register_callback(ipm_handle, ipm_callback, NULL);
216
217 err = ipm_set_enabled(ipm_handle, 1);
218 if (err != 0) {
219 LOG_ERR("Could not enable IPM interrupts and callbacks");
220 return err;
221 }
222 #endif
223
224 /* Virtqueue setup */
225 vqueue[0] = virtqueue_allocate(VRING_SIZE);
226 if (!vqueue[0]) {
227 LOG_ERR("virtqueue_allocate failed to alloc vqueue[0]");
228 return -ENOMEM;
229 }
230
231 vqueue[1] = virtqueue_allocate(VRING_SIZE);
232 if (!vqueue[1]) {
233 LOG_ERR("virtqueue_allocate failed to alloc vqueue[1]");
234 return -ENOMEM;
235 }
236
237 rvrings[0].io = *io;
238 rvrings[0].info.vaddr = (void *)VRING_TX_ADDRESS;
239 rvrings[0].info.num_descs = VRING_SIZE;
240 rvrings[0].info.align = VRING_ALIGNMENT;
241 rvrings[0].vq = vqueue[0];
242
243 rvrings[1].io = *io;
244 rvrings[1].info.vaddr = (void *)VRING_RX_ADDRESS;
245 rvrings[1].info.num_descs = VRING_SIZE;
246 rvrings[1].info.align = VRING_ALIGNMENT;
247 rvrings[1].vq = vqueue[1];
248
249 vdev->role = RPMSG_ROLE;
250 vdev->vrings_num = VRING_COUNT;
251 vdev->func = &dispatch;
252 vdev->vrings_info = &rvrings[0];
253
254 return 0;
255 }
256
257 #if MASTER
258 /* Make sure we clear out the status flag very early (before we bringup the
259 * secondary core) so the secondary core see's the proper status
260 */
init_status_flag(void)261 int init_status_flag(void)
262 {
263 ipc_virtio_set_status(NULL, 0);
264
265 return 0;
266 }
267
268 SYS_INIT(init_status_flag, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
269 #endif /* MASTER */
270