1 /*
2 * Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/logging/log.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/cache.h>
10 #include <zephyr/device.h>
11 #include <zephyr/init.h>
12 #include <zephyr/sys/atomic.h>
13
14 #include <zephyr/ipc/ipc_service_backend.h>
15 #include <zephyr/ipc/ipc_static_vrings.h>
16 #include <zephyr/ipc/ipc_rpmsg.h>
17
18 #include <zephyr/drivers/mbox.h>
19 #include <zephyr/dt-bindings/ipc_service/static_vrings.h>
20
21 #include "ipc_rpmsg_static_vrings.h"
22
23 #define DT_DRV_COMPAT zephyr_ipc_openamp_static_vrings
24
25 #define NUM_INSTANCES DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT)
26
27 #define WQ_STACK_SIZE CONFIG_IPC_SERVICE_BACKEND_RPMSG_WQ_STACK_SIZE
28
29 #define STATE_READY (0)
30 #define STATE_BUSY (1)
31 #define STATE_INITED (2)
32
33 #if defined(CONFIG_THREAD_MAX_NAME_LEN)
34 #define THREAD_MAX_NAME_LEN CONFIG_THREAD_MAX_NAME_LEN
35 #else
36 #define THREAD_MAX_NAME_LEN 1
37 #endif
38
39 K_THREAD_STACK_ARRAY_DEFINE(mbox_stack, NUM_INSTANCES, WQ_STACK_SIZE);
40
41 struct backend_data_t {
42 /* RPMsg */
43 struct ipc_rpmsg_instance rpmsg_inst;
44
45 /* Static VRINGs */
46 struct ipc_static_vrings vr;
47
48 /* MBOX WQ */
49 struct k_work mbox_work;
50 struct k_work_q mbox_wq;
51
52 /* General */
53 unsigned int role;
54 atomic_t state;
55
56 /* TX buffer size */
57 int tx_buffer_size;
58 };
59
60 struct backend_config_t {
61 unsigned int role;
62 uintptr_t shm_addr;
63 size_t shm_size;
64 struct mbox_dt_spec mbox_tx;
65 struct mbox_dt_spec mbox_rx;
66 unsigned int wq_prio_type;
67 unsigned int wq_prio;
68 unsigned int id;
69 unsigned int buffer_size;
70 };
71
rpmsg_service_unbind(struct rpmsg_endpoint * ep)72 static void rpmsg_service_unbind(struct rpmsg_endpoint *ep)
73 {
74 rpmsg_destroy_ept(ep);
75 }
76
get_ept_slot_with_name(struct ipc_rpmsg_instance * rpmsg_inst,const char * name)77 static struct ipc_rpmsg_ept *get_ept_slot_with_name(struct ipc_rpmsg_instance *rpmsg_inst,
78 const char *name)
79 {
80 struct ipc_rpmsg_ept *rpmsg_ept;
81
82 for (size_t i = 0; i < NUM_ENDPOINTS; i++) {
83 rpmsg_ept = &rpmsg_inst->endpoint[i];
84
85 if (strcmp(name, rpmsg_ept->name) == 0) {
86 return &rpmsg_inst->endpoint[i];
87 }
88 }
89
90 return NULL;
91 }
92
get_available_ept_slot(struct ipc_rpmsg_instance * rpmsg_inst)93 static struct ipc_rpmsg_ept *get_available_ept_slot(struct ipc_rpmsg_instance *rpmsg_inst)
94 {
95 return get_ept_slot_with_name(rpmsg_inst, "");
96 }
97
check_endpoints_freed(struct ipc_rpmsg_instance * rpmsg_inst)98 static bool check_endpoints_freed(struct ipc_rpmsg_instance *rpmsg_inst)
99 {
100 struct ipc_rpmsg_ept *rpmsg_ept;
101
102 for (size_t i = 0; i < NUM_ENDPOINTS; i++) {
103 rpmsg_ept = &rpmsg_inst->endpoint[i];
104
105 if (rpmsg_ept->bound == true) {
106 return false;
107 }
108 }
109
110 return true;
111 }
112
113 /*
114 * Returns:
115 * - true: when the endpoint was already cached / registered
116 * - false: when the endpoint was never registered before
117 *
118 * Returns in **rpmsg_ept:
119 * - The endpoint with the name *name if it exists
120 * - The first endpoint slot available when the endpoint with name *name does
121 * not exist
122 * - NULL in case of error
123 */
get_ept(struct ipc_rpmsg_instance * rpmsg_inst,struct ipc_rpmsg_ept ** rpmsg_ept,const char * name)124 static bool get_ept(struct ipc_rpmsg_instance *rpmsg_inst,
125 struct ipc_rpmsg_ept **rpmsg_ept, const char *name)
126 {
127 struct ipc_rpmsg_ept *ept;
128
129 ept = get_ept_slot_with_name(rpmsg_inst, name);
130 if (ept != NULL) {
131 (*rpmsg_ept) = ept;
132 return true;
133 }
134
135 ept = get_available_ept_slot(rpmsg_inst);
136 if (ept != NULL) {
137 (*rpmsg_ept) = ept;
138 return false;
139 }
140
141 (*rpmsg_ept) = NULL;
142
143 return false;
144 }
145
advertise_ept(struct ipc_rpmsg_instance * rpmsg_inst,struct ipc_rpmsg_ept * rpmsg_ept,const char * name,uint32_t dest)146 static void advertise_ept(struct ipc_rpmsg_instance *rpmsg_inst, struct ipc_rpmsg_ept *rpmsg_ept,
147 const char *name, uint32_t dest)
148 {
149 struct rpmsg_device *rdev;
150 int err;
151
152 rdev = rpmsg_virtio_get_rpmsg_device(&rpmsg_inst->rvdev);
153
154 err = rpmsg_create_ept(&rpmsg_ept->ep, rdev, name, RPMSG_ADDR_ANY,
155 dest, rpmsg_inst->cb, rpmsg_service_unbind);
156 if (err != 0) {
157 return;
158 }
159
160 rpmsg_ept->bound = true;
161 if (rpmsg_inst->bound_cb) {
162 rpmsg_inst->bound_cb(rpmsg_ept);
163 }
164 }
165
ns_bind_cb(struct rpmsg_device * rdev,const char * name,uint32_t dest)166 static void ns_bind_cb(struct rpmsg_device *rdev, const char *name, uint32_t dest)
167 {
168 struct ipc_rpmsg_instance *rpmsg_inst;
169 struct rpmsg_virtio_device *p_rvdev;
170 struct ipc_rpmsg_ept *rpmsg_ept;
171 bool ept_cached;
172
173 p_rvdev = CONTAINER_OF(rdev, struct rpmsg_virtio_device, rdev);
174 rpmsg_inst = CONTAINER_OF(p_rvdev->shpool, struct ipc_rpmsg_instance, shm_pool);
175
176 if (name == NULL || name[0] == '\0') {
177 return;
178 }
179
180 k_mutex_lock(&rpmsg_inst->mtx, K_FOREVER);
181 ept_cached = get_ept(rpmsg_inst, &rpmsg_ept, name);
182
183 if (rpmsg_ept == NULL) {
184 k_mutex_unlock(&rpmsg_inst->mtx);
185 return;
186 }
187
188 if (ept_cached) {
189 /*
190 * The endpoint was already registered by the HOST core. The
191 * endpoint can now be advertised to the REMOTE core.
192 */
193 k_mutex_unlock(&rpmsg_inst->mtx);
194 advertise_ept(rpmsg_inst, rpmsg_ept, name, dest);
195 } else {
196 /*
197 * The endpoint is not registered yet, this happens when the
198 * REMOTE core registers the endpoint before the HOST has
199 * had the chance to register it. Cache it saving name and
200 * destination address to be used by the next register_ept()
201 * call by the HOST core.
202 */
203 strncpy(rpmsg_ept->name, name, sizeof(rpmsg_ept->name));
204 rpmsg_ept->dest = dest;
205 k_mutex_unlock(&rpmsg_inst->mtx);
206 }
207 }
208
bound_cb(struct ipc_rpmsg_ept * ept)209 static void bound_cb(struct ipc_rpmsg_ept *ept)
210 {
211 rpmsg_send(&ept->ep, (uint8_t *)"", 0);
212
213 if (ept->cb->bound) {
214 ept->cb->bound(ept->priv);
215 }
216 }
217
ept_cb(struct rpmsg_endpoint * ep,void * data,size_t len,uint32_t src,void * priv)218 static int ept_cb(struct rpmsg_endpoint *ep, void *data, size_t len, uint32_t src, void *priv)
219 {
220 struct ipc_rpmsg_ept *ept;
221
222 ept = (struct ipc_rpmsg_ept *) priv;
223
224 /*
225 * the remote processor has send a ns announcement, we use an empty
226 * message to advice the remote side that a local endpoint has been
227 * created and that the processor is ready to communicate with this
228 * endpoint
229 *
230 * ipc_rpmsg_register_ept
231 * rpmsg_send_ns_message --------------> ns_bind_cb
232 * bound_cb
233 * ept_cb <--------------- rpmsg_send [empty message]
234 * bound_cb
235 */
236 if (len == 0) {
237 if (!ept->bound) {
238 ept->bound = true;
239 bound_cb(ept);
240 }
241 return RPMSG_SUCCESS;
242 }
243
244 if (ept->cb->received) {
245 ept->cb->received(data, len, ept->priv);
246 }
247
248 return RPMSG_SUCCESS;
249 }
250
vr_shm_configure(struct ipc_static_vrings * vr,const struct backend_config_t * conf)251 static int vr_shm_configure(struct ipc_static_vrings *vr, const struct backend_config_t *conf)
252 {
253 unsigned int num_desc;
254
255 num_desc = optimal_num_desc(conf->shm_size, conf->buffer_size);
256 if (num_desc == 0) {
257 return -ENOMEM;
258 }
259
260 /*
261 * conf->shm_addr +--------------+ vr->status_reg_addr
262 * | STATUS |
263 * +--------------+ vr->shm_addr
264 * | |
265 * | |
266 * | RX BUFS |
267 * | |
268 * | |
269 * +--------------+
270 * | |
271 * | |
272 * | TX BUFS |
273 * | |
274 * | |
275 * +--------------+ vr->rx_addr (aligned)
276 * | RX VRING |
277 * +--------------+ vr->tx_addr (aligned)
278 * | TX VRING |
279 * +--------------+
280 */
281
282 vr->shm_addr = ROUND_UP(conf->shm_addr + VDEV_STATUS_SIZE, MEM_ALIGNMENT);
283 vr->shm_size = shm_size(num_desc, conf->buffer_size);
284
285 vr->rx_addr = vr->shm_addr + VRING_COUNT * vq_ring_size(num_desc, conf->buffer_size);
286 vr->tx_addr = ROUND_UP(vr->rx_addr + vring_size(num_desc, MEM_ALIGNMENT),
287 MEM_ALIGNMENT);
288
289 vr->status_reg_addr = conf->shm_addr;
290
291 vr->vring_size = num_desc;
292
293 return 0;
294 }
295
virtio_notify_cb(struct virtqueue * vq,void * priv)296 static void virtio_notify_cb(struct virtqueue *vq, void *priv)
297 {
298 struct backend_config_t *conf = priv;
299
300 if (conf->mbox_tx.dev) {
301 mbox_send_dt(&conf->mbox_tx, NULL);
302 }
303 }
304
mbox_callback_process(struct k_work * item)305 static void mbox_callback_process(struct k_work *item)
306 {
307 struct backend_data_t *data;
308 unsigned int vq_id;
309
310 data = CONTAINER_OF(item, struct backend_data_t, mbox_work);
311 vq_id = (data->role == ROLE_HOST) ? VIRTQUEUE_ID_HOST : VIRTQUEUE_ID_REMOTE;
312
313 virtqueue_notification(data->vr.vq[vq_id]);
314 }
315
mbox_callback(const struct device * instance,uint32_t channel,void * user_data,struct mbox_msg * msg_data)316 static void mbox_callback(const struct device *instance, uint32_t channel,
317 void *user_data, struct mbox_msg *msg_data)
318 {
319 struct backend_data_t *data = user_data;
320
321 k_work_submit_to_queue(&data->mbox_wq, &data->mbox_work);
322 }
323
mbox_init(const struct device * instance)324 static int mbox_init(const struct device *instance)
325 {
326 const struct backend_config_t *conf = instance->config;
327 struct backend_data_t *data = instance->data;
328 struct k_work_queue_config wq_cfg = {.name = instance->name};
329 int prio, err;
330
331 prio = (conf->wq_prio_type == PRIO_COOP) ? K_PRIO_COOP(conf->wq_prio) :
332 K_PRIO_PREEMPT(conf->wq_prio);
333
334 k_work_queue_init(&data->mbox_wq);
335 k_work_queue_start(&data->mbox_wq, mbox_stack[conf->id], WQ_STACK_SIZE, prio, &wq_cfg);
336
337 if (IS_ENABLED(CONFIG_THREAD_NAME)) {
338 char name[THREAD_MAX_NAME_LEN];
339
340 snprintk(name, sizeof(name), "mbox_wq #%d", conf->id);
341 k_thread_name_set(&data->mbox_wq.thread, name);
342 }
343
344 k_work_init(&data->mbox_work, mbox_callback_process);
345
346 err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, data);
347 if (err != 0) {
348 return err;
349 }
350
351 return mbox_set_enabled_dt(&conf->mbox_rx, 1);
352 }
353
mbox_deinit(const struct device * instance)354 static int mbox_deinit(const struct device *instance)
355 {
356 const struct backend_config_t *conf = instance->config;
357 struct backend_data_t *data = instance->data;
358 k_tid_t wq_thread;
359 int err;
360
361 err = mbox_set_enabled_dt(&conf->mbox_rx, 0);
362 if (err != 0) {
363 return err;
364 }
365
366 k_work_queue_drain(&data->mbox_wq, 1);
367
368 wq_thread = k_work_queue_thread_get(&data->mbox_wq);
369 k_thread_abort(wq_thread);
370
371 return 0;
372 }
373
register_ept_on_host(struct ipc_rpmsg_instance * rpmsg_inst,const struct ipc_ept_cfg * cfg)374 static struct ipc_rpmsg_ept *register_ept_on_host(struct ipc_rpmsg_instance *rpmsg_inst,
375 const struct ipc_ept_cfg *cfg)
376 {
377 struct ipc_rpmsg_ept *rpmsg_ept;
378 bool ept_cached;
379
380 k_mutex_lock(&rpmsg_inst->mtx, K_FOREVER);
381
382 ept_cached = get_ept(rpmsg_inst, &rpmsg_ept, cfg->name);
383 if (rpmsg_ept == NULL) {
384 k_mutex_unlock(&rpmsg_inst->mtx);
385 return NULL;
386 }
387
388 rpmsg_ept->cb = &cfg->cb;
389 rpmsg_ept->priv = cfg->priv;
390 rpmsg_ept->bound = false;
391 rpmsg_ept->ep.priv = rpmsg_ept;
392
393 if (ept_cached) {
394 /*
395 * The endpoint was cached in the NS bind callback. We can finally
396 * advertise it.
397 */
398 k_mutex_unlock(&rpmsg_inst->mtx);
399 advertise_ept(rpmsg_inst, rpmsg_ept, cfg->name, rpmsg_ept->dest);
400 } else {
401 /*
402 * There is no endpoint in the cache because the REMOTE has
403 * not registered the endpoint yet. Cache it.
404 */
405 strncpy(rpmsg_ept->name, cfg->name, sizeof(rpmsg_ept->name));
406 k_mutex_unlock(&rpmsg_inst->mtx);
407 }
408
409 return rpmsg_ept;
410 }
411
register_ept_on_remote(struct ipc_rpmsg_instance * rpmsg_inst,const struct ipc_ept_cfg * cfg)412 static struct ipc_rpmsg_ept *register_ept_on_remote(struct ipc_rpmsg_instance *rpmsg_inst,
413 const struct ipc_ept_cfg *cfg)
414 {
415 struct ipc_rpmsg_ept *rpmsg_ept;
416 int err;
417
418 rpmsg_ept = get_available_ept_slot(rpmsg_inst);
419 if (rpmsg_ept == NULL) {
420 return NULL;
421 }
422
423 rpmsg_ept->cb = &cfg->cb;
424 rpmsg_ept->priv = cfg->priv;
425 rpmsg_ept->bound = false;
426 rpmsg_ept->ep.priv = rpmsg_ept;
427
428 strncpy(rpmsg_ept->name, cfg->name, sizeof(rpmsg_ept->name));
429
430 err = ipc_rpmsg_register_ept(rpmsg_inst, RPMSG_REMOTE, rpmsg_ept);
431 if (err != 0) {
432 return NULL;
433 }
434
435 return rpmsg_ept;
436 }
437
register_ept(const struct device * instance,void ** token,const struct ipc_ept_cfg * cfg)438 static int register_ept(const struct device *instance, void **token,
439 const struct ipc_ept_cfg *cfg)
440 {
441 struct backend_data_t *data = instance->data;
442 struct ipc_rpmsg_instance *rpmsg_inst;
443 struct ipc_rpmsg_ept *rpmsg_ept;
444
445 /* Instance is not ready */
446 if (atomic_get(&data->state) != STATE_INITED) {
447 return -EBUSY;
448 }
449
450 /* Empty name is not valid */
451 if (cfg->name == NULL || cfg->name[0] == '\0') {
452 return -EINVAL;
453 }
454
455 rpmsg_inst = &data->rpmsg_inst;
456
457 rpmsg_ept = (data->role == ROLE_HOST) ?
458 register_ept_on_host(rpmsg_inst, cfg) :
459 register_ept_on_remote(rpmsg_inst, cfg);
460 if (rpmsg_ept == NULL) {
461 return -EINVAL;
462 }
463
464 (*token) = rpmsg_ept;
465
466 return 0;
467 }
468
deregister_ept(const struct device * instance,void * token)469 static int deregister_ept(const struct device *instance, void *token)
470 {
471 struct backend_data_t *data = instance->data;
472 struct ipc_rpmsg_ept *rpmsg_ept;
473 static struct k_work_sync sync;
474
475 /* Instance is not ready */
476 if (atomic_get(&data->state) != STATE_INITED) {
477 return -EBUSY;
478 }
479
480 rpmsg_ept = (struct ipc_rpmsg_ept *) token;
481
482 /* Endpoint is not registered with instance */
483 if (!rpmsg_ept) {
484 return -ENOENT;
485 }
486
487 /* Drain pending work items before tearing down channel.
488 *
489 * Note: `k_work_flush` Faults on Cortex-M33 with "illegal use of EPSR"
490 * if `sync` is not declared static.
491 */
492 k_work_flush(&data->mbox_work, &sync);
493
494 rpmsg_destroy_ept(&rpmsg_ept->ep);
495
496 memset(rpmsg_ept, 0, sizeof(struct ipc_rpmsg_ept));
497
498 return 0;
499 }
500
send(const struct device * instance,void * token,const void * msg,size_t len)501 static int send(const struct device *instance, void *token,
502 const void *msg, size_t len)
503 {
504 struct backend_data_t *data = instance->data;
505 struct ipc_rpmsg_ept *rpmsg_ept;
506 int ret;
507
508 /* Instance is not ready */
509 if (atomic_get(&data->state) != STATE_INITED) {
510 return -EBUSY;
511 }
512
513 /* Empty message is not allowed */
514 if (len == 0) {
515 return -EBADMSG;
516 }
517
518 rpmsg_ept = (struct ipc_rpmsg_ept *) token;
519
520 /* Endpoint is not registered with instance */
521 if (!rpmsg_ept) {
522 return -ENOENT;
523 }
524
525 ret = rpmsg_send(&rpmsg_ept->ep, msg, len);
526
527 /* No buffers available */
528 if (ret == RPMSG_ERR_NO_BUFF) {
529 return -ENOMEM;
530 }
531
532 return ret;
533 }
534
send_nocopy(const struct device * instance,void * token,const void * msg,size_t len)535 static int send_nocopy(const struct device *instance, void *token,
536 const void *msg, size_t len)
537 {
538 struct backend_data_t *data = instance->data;
539 struct ipc_rpmsg_ept *rpmsg_ept;
540
541 /* Instance is not ready */
542 if (atomic_get(&data->state) != STATE_INITED) {
543 return -EBUSY;
544 }
545
546 /* Empty message is not allowed */
547 if (len == 0) {
548 return -EBADMSG;
549 }
550
551 rpmsg_ept = (struct ipc_rpmsg_ept *) token;
552
553 /* Endpoint is not registered with instance */
554 if (!rpmsg_ept) {
555 return -ENOENT;
556 }
557
558 return rpmsg_send_nocopy(&rpmsg_ept->ep, msg, len);
559 }
560
open(const struct device * instance)561 static int open(const struct device *instance)
562 {
563 const struct backend_config_t *conf = instance->config;
564 struct backend_data_t *data = instance->data;
565 struct ipc_rpmsg_instance *rpmsg_inst;
566 struct rpmsg_device *rdev;
567 int err;
568
569 if (!atomic_cas(&data->state, STATE_READY, STATE_BUSY)) {
570 return -EALREADY;
571 }
572
573 err = vr_shm_configure(&data->vr, conf);
574 if (err != 0) {
575 goto error;
576 }
577
578 data->vr.notify_cb = virtio_notify_cb;
579 data->vr.priv = (void *) conf;
580
581 err = ipc_static_vrings_init(&data->vr, conf->role);
582 if (err != 0) {
583 goto error;
584 }
585
586 err = mbox_init(instance);
587 if (err != 0) {
588 goto error;
589 }
590
591 rpmsg_inst = &data->rpmsg_inst;
592
593 rpmsg_inst->bound_cb = bound_cb;
594 rpmsg_inst->cb = ept_cb;
595
596 err = ipc_rpmsg_init(rpmsg_inst, data->role, conf->buffer_size,
597 &data->vr.shm_io, &data->vr.vdev,
598 (void *)data->vr.shm_addr,
599 data->vr.shm_size, ns_bind_cb);
600 if (err != 0) {
601 goto error;
602 }
603
604 rdev = rpmsg_virtio_get_rpmsg_device(&rpmsg_inst->rvdev);
605
606 data->tx_buffer_size = rpmsg_virtio_get_buffer_size(rdev);
607 if (data->tx_buffer_size < 0) {
608 err = -EINVAL;
609 goto error;
610 }
611
612 atomic_set(&data->state, STATE_INITED);
613 return 0;
614
615 error:
616 /* Back to the ready state */
617 atomic_set(&data->state, STATE_READY);
618 return err;
619
620 }
621
close(const struct device * instance)622 static int close(const struct device *instance)
623 {
624 const struct backend_config_t *conf = instance->config;
625 struct backend_data_t *data = instance->data;
626 struct ipc_rpmsg_instance *rpmsg_inst;
627 int err;
628
629 if (!atomic_cas(&data->state, STATE_INITED, STATE_BUSY)) {
630 return -EALREADY;
631 }
632
633 rpmsg_inst = &data->rpmsg_inst;
634
635 if (!check_endpoints_freed(rpmsg_inst)) {
636 return -EBUSY;
637 }
638
639 err = ipc_rpmsg_deinit(rpmsg_inst, data->role);
640 if (err != 0) {
641 goto error;
642 }
643
644 err = mbox_deinit(instance);
645 if (err != 0) {
646 goto error;
647 }
648
649 err = ipc_static_vrings_deinit(&data->vr, conf->role);
650 if (err != 0) {
651 goto error;
652 }
653
654 memset(&data->vr, 0, sizeof(struct ipc_static_vrings));
655 memset(rpmsg_inst, 0, sizeof(struct ipc_rpmsg_instance));
656
657 atomic_set(&data->state, STATE_READY);
658 return 0;
659
660 error:
661 /* Back to the inited state */
662 atomic_set(&data->state, STATE_INITED);
663 return err;
664 }
665
get_tx_buffer_size(const struct device * instance,void * token)666 static int get_tx_buffer_size(const struct device *instance, void *token)
667 {
668 struct backend_data_t *data = instance->data;
669
670 return data->tx_buffer_size;
671 }
672
get_tx_buffer(const struct device * instance,void * token,void ** r_data,uint32_t * size,k_timeout_t wait)673 static int get_tx_buffer(const struct device *instance, void *token,
674 void **r_data, uint32_t *size, k_timeout_t wait)
675 {
676 struct backend_data_t *data = instance->data;
677 struct ipc_rpmsg_ept *rpmsg_ept;
678 void *payload;
679
680 rpmsg_ept = (struct ipc_rpmsg_ept *) token;
681
682 /* Endpoint is not registered with instance */
683 if (!rpmsg_ept) {
684 return -ENOENT;
685 }
686
687 if (!r_data || !size) {
688 return -EINVAL;
689 }
690
691 /* OpenAMP only supports a binary wait / no-wait */
692 if (!K_TIMEOUT_EQ(wait, K_FOREVER) && !K_TIMEOUT_EQ(wait, K_NO_WAIT)) {
693 return -ENOTSUP;
694 }
695
696 /* The user requested a specific size */
697 if ((*size) && (*size > data->tx_buffer_size)) {
698 /* Too big to fit */
699 *size = data->tx_buffer_size;
700 return -ENOMEM;
701 }
702
703 /*
704 * OpenAMP doesn't really have the concept of forever but instead it
705 * gives up after 15 seconds. In that case, just keep retrying.
706 */
707 do {
708 payload = rpmsg_get_tx_payload_buffer(&rpmsg_ept->ep, size,
709 K_TIMEOUT_EQ(wait, K_FOREVER));
710 } while ((!payload) && K_TIMEOUT_EQ(wait, K_FOREVER));
711
712 /* This should really only be valid for K_NO_WAIT */
713 if (!payload) {
714 return -ENOBUFS;
715 }
716
717 (*r_data) = payload;
718
719 return 0;
720 }
721
hold_rx_buffer(const struct device * instance,void * token,void * data)722 static int hold_rx_buffer(const struct device *instance, void *token,
723 void *data)
724 {
725 struct ipc_rpmsg_ept *rpmsg_ept;
726
727 rpmsg_ept = (struct ipc_rpmsg_ept *) token;
728
729 /* Endpoint is not registered with instance */
730 if (!rpmsg_ept) {
731 return -ENOENT;
732 }
733
734 rpmsg_hold_rx_buffer(&rpmsg_ept->ep, data);
735
736 return 0;
737 }
738
release_rx_buffer(const struct device * instance,void * token,void * data)739 static int release_rx_buffer(const struct device *instance, void *token,
740 void *data)
741 {
742 struct ipc_rpmsg_ept *rpmsg_ept;
743
744 rpmsg_ept = (struct ipc_rpmsg_ept *) token;
745
746 /* Endpoint is not registered with instance */
747 if (!rpmsg_ept) {
748 return -ENOENT;
749 }
750
751 rpmsg_release_rx_buffer(&rpmsg_ept->ep, data);
752
753 return 0;
754 }
755
drop_tx_buffer(const struct device * instance,void * token,const void * data)756 static int drop_tx_buffer(const struct device *instance, void *token,
757 const void *data)
758 {
759 /* Not yet supported by OpenAMP */
760 return -ENOTSUP;
761 }
762
763 const static struct ipc_service_backend backend_ops = {
764 .open_instance = open,
765 .close_instance = close,
766 .register_endpoint = register_ept,
767 .deregister_endpoint = deregister_ept,
768 .send = send,
769 .send_nocopy = send_nocopy,
770 .drop_tx_buffer = drop_tx_buffer,
771 .get_tx_buffer = get_tx_buffer,
772 .get_tx_buffer_size = get_tx_buffer_size,
773 .hold_rx_buffer = hold_rx_buffer,
774 .release_rx_buffer = release_rx_buffer,
775 };
776
backend_init(const struct device * instance)777 static int backend_init(const struct device *instance)
778 {
779 const struct backend_config_t *conf = instance->config;
780 struct backend_data_t *data = instance->data;
781
782 data->role = conf->role;
783
784 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
785 __ASSERT((VDEV_STATUS_SIZE % sys_cache_data_line_size_get()) == 0U,
786 "VDEV status area must be aligned to the cache line");
787 __ASSERT((MEM_ALIGNMENT % sys_cache_data_line_size_get()) == 0U,
788 "Static VRINGs must be aligned to the cache line");
789 __ASSERT((conf->buffer_size % sys_cache_data_line_size_get()) == 0U,
790 "Buffers must be aligned to the cache line ");
791 #endif
792
793 k_mutex_init(&data->rpmsg_inst.mtx);
794 atomic_set(&data->state, STATE_READY);
795
796 return 0;
797 }
798
799
800 #if defined(CONFIG_ARCH_POSIX)
801 #define BACKEND_PRE(i) extern char IPC##i##_shm_buffer[];
802 #define BACKEND_SHM_ADDR(i) (const uintptr_t)IPC##i##_shm_buffer
803 #else
804 #define BACKEND_PRE(i)
805 #define BACKEND_SHM_ADDR(i) DT_REG_ADDR(DT_INST_PHANDLE(i, memory_region))
806 #endif /* defined(CONFIG_ARCH_POSIX) */
807
808 #define DEFINE_BACKEND_DEVICE(i) \
809 BACKEND_PRE(i) \
810 static struct backend_config_t backend_config_##i = { \
811 .role = DT_ENUM_IDX_OR(DT_DRV_INST(i), role, ROLE_HOST), \
812 .shm_size = DT_REG_SIZE(DT_INST_PHANDLE(i, memory_region)), \
813 .shm_addr = BACKEND_SHM_ADDR(i), \
814 .mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
815 .mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
816 .wq_prio = COND_CODE_1(DT_INST_NODE_HAS_PROP(i, zephyr_priority), \
817 (DT_INST_PROP_BY_IDX(i, zephyr_priority, 0)), \
818 (0)), \
819 .wq_prio_type = COND_CODE_1(DT_INST_NODE_HAS_PROP(i, zephyr_priority), \
820 (DT_INST_PROP_BY_IDX(i, zephyr_priority, 1)), \
821 (PRIO_PREEMPT)), \
822 .buffer_size = DT_INST_PROP_OR(i, zephyr_buffer_size, \
823 RPMSG_BUFFER_SIZE), \
824 .id = i, \
825 }; \
826 \
827 static struct backend_data_t backend_data_##i; \
828 \
829 DEVICE_DT_INST_DEFINE(i, \
830 &backend_init, \
831 NULL, \
832 &backend_data_##i, \
833 &backend_config_##i, \
834 POST_KERNEL, \
835 CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
836 &backend_ops);
837
DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)838 DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)
839
840 #define BACKEND_CONFIG_INIT(n) &backend_config_##n,
841
842 #if defined(CONFIG_IPC_SERVICE_BACKEND_RPMSG_SHMEM_RESET)
843 static int shared_memory_prepare(void)
844 {
845 static const struct backend_config_t *config[] = {
846 DT_INST_FOREACH_STATUS_OKAY(BACKEND_CONFIG_INIT)
847 };
848
849 for (int i = 0; i < DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT); i++) {
850 if (config[i]->role == ROLE_HOST) {
851 memset((void *) config[i]->shm_addr, 0, VDEV_STATUS_SIZE);
852 }
853 }
854
855 return 0;
856 }
857
858 SYS_INIT(shared_memory_prepare, PRE_KERNEL_1, 1);
859 #endif /* CONFIG_IPC_SERVICE_BACKEND_RPMSG_SHMEM_RESET */
860