1 /*
2  * Copyright (c) 2021 Carlo Caione <ccaione@baylibre.com>
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/logging/log.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/cache.h>
10 #include <zephyr/device.h>
11 #include <zephyr/init.h>
12 #include <zephyr/sys/atomic.h>
13 
14 #include <zephyr/ipc/ipc_service_backend.h>
15 #include <zephyr/ipc/ipc_static_vrings.h>
16 #include <zephyr/ipc/ipc_rpmsg.h>
17 
18 #include <zephyr/drivers/mbox.h>
19 #include <zephyr/dt-bindings/ipc_service/static_vrings.h>
20 
21 #include "ipc_rpmsg_static_vrings.h"
22 
23 #define DT_DRV_COMPAT	zephyr_ipc_openamp_static_vrings
24 
25 #define NUM_INSTANCES	DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT)
26 
27 #define WQ_STACK_SIZE	CONFIG_IPC_SERVICE_BACKEND_RPMSG_WQ_STACK_SIZE
28 
29 #define STATE_READY	(0)
30 #define STATE_BUSY	(1)
31 #define STATE_INITED	(2)
32 
33 #if defined(CONFIG_THREAD_MAX_NAME_LEN)
34 #define THREAD_MAX_NAME_LEN CONFIG_THREAD_MAX_NAME_LEN
35 #else
36 #define THREAD_MAX_NAME_LEN 1
37 #endif
38 
39 K_THREAD_STACK_ARRAY_DEFINE(mbox_stack, NUM_INSTANCES, WQ_STACK_SIZE);
40 
41 struct backend_data_t {
42 	/* RPMsg */
43 	struct ipc_rpmsg_instance rpmsg_inst;
44 
45 	/* Static VRINGs */
46 	struct ipc_static_vrings vr;
47 
48 	/* MBOX WQ */
49 	struct k_work mbox_work;
50 	struct k_work_q mbox_wq;
51 
52 	/* General */
53 	unsigned int role;
54 	atomic_t state;
55 
56 	/* TX buffer size */
57 	int tx_buffer_size;
58 };
59 
60 struct backend_config_t {
61 	unsigned int role;
62 	uintptr_t shm_addr;
63 	size_t shm_size;
64 	struct mbox_dt_spec mbox_tx;
65 	struct mbox_dt_spec mbox_rx;
66 	unsigned int wq_prio_type;
67 	unsigned int wq_prio;
68 	unsigned int id;
69 	unsigned int buffer_size;
70 };
71 
rpmsg_service_unbind(struct rpmsg_endpoint * ep)72 static void rpmsg_service_unbind(struct rpmsg_endpoint *ep)
73 {
74 	rpmsg_destroy_ept(ep);
75 }
76 
get_ept_slot_with_name(struct ipc_rpmsg_instance * rpmsg_inst,const char * name)77 static struct ipc_rpmsg_ept *get_ept_slot_with_name(struct ipc_rpmsg_instance *rpmsg_inst,
78 						    const char *name)
79 {
80 	struct ipc_rpmsg_ept *rpmsg_ept;
81 
82 	for (size_t i = 0; i < NUM_ENDPOINTS; i++) {
83 		rpmsg_ept = &rpmsg_inst->endpoint[i];
84 
85 		if (strcmp(name, rpmsg_ept->name) == 0) {
86 			return &rpmsg_inst->endpoint[i];
87 		}
88 	}
89 
90 	return NULL;
91 }
92 
get_available_ept_slot(struct ipc_rpmsg_instance * rpmsg_inst)93 static struct ipc_rpmsg_ept *get_available_ept_slot(struct ipc_rpmsg_instance *rpmsg_inst)
94 {
95 	return get_ept_slot_with_name(rpmsg_inst, "");
96 }
97 
check_endpoints_freed(struct ipc_rpmsg_instance * rpmsg_inst)98 static bool check_endpoints_freed(struct ipc_rpmsg_instance *rpmsg_inst)
99 {
100 	struct ipc_rpmsg_ept *rpmsg_ept;
101 
102 	for (size_t i = 0; i < NUM_ENDPOINTS; i++) {
103 		rpmsg_ept = &rpmsg_inst->endpoint[i];
104 
105 		if (rpmsg_ept->bound == true) {
106 			return false;
107 		}
108 	}
109 
110 	return true;
111 }
112 
113 /*
114  * Returns:
115  *  - true:  when the endpoint was already cached / registered
116  *  - false: when the endpoint was never registered before
117  *
118  * Returns in **rpmsg_ept:
119  *  - The endpoint with the name *name if it exists
120  *  - The first endpoint slot available when the endpoint with name *name does
121  *    not exist
122  *  - NULL in case of error
123  */
get_ept(struct ipc_rpmsg_instance * rpmsg_inst,struct ipc_rpmsg_ept ** rpmsg_ept,const char * name)124 static bool get_ept(struct ipc_rpmsg_instance *rpmsg_inst,
125 		    struct ipc_rpmsg_ept **rpmsg_ept, const char *name)
126 {
127 	struct ipc_rpmsg_ept *ept;
128 
129 	ept = get_ept_slot_with_name(rpmsg_inst, name);
130 	if (ept != NULL) {
131 		(*rpmsg_ept) = ept;
132 		return true;
133 	}
134 
135 	ept = get_available_ept_slot(rpmsg_inst);
136 	if (ept != NULL) {
137 		(*rpmsg_ept) = ept;
138 		return false;
139 	}
140 
141 	(*rpmsg_ept) = NULL;
142 
143 	return false;
144 }
145 
advertise_ept(struct ipc_rpmsg_instance * rpmsg_inst,struct ipc_rpmsg_ept * rpmsg_ept,const char * name,uint32_t dest)146 static void advertise_ept(struct ipc_rpmsg_instance *rpmsg_inst, struct ipc_rpmsg_ept *rpmsg_ept,
147 			  const char *name, uint32_t dest)
148 {
149 	struct rpmsg_device *rdev;
150 	int err;
151 
152 	rdev = rpmsg_virtio_get_rpmsg_device(&rpmsg_inst->rvdev);
153 
154 	err = rpmsg_create_ept(&rpmsg_ept->ep, rdev, name, RPMSG_ADDR_ANY,
155 			       dest, rpmsg_inst->cb, rpmsg_service_unbind);
156 	if (err != 0) {
157 		return;
158 	}
159 
160 	rpmsg_ept->bound = true;
161 	if (rpmsg_inst->bound_cb) {
162 		rpmsg_inst->bound_cb(rpmsg_ept);
163 	}
164 }
165 
ns_bind_cb(struct rpmsg_device * rdev,const char * name,uint32_t dest)166 static void ns_bind_cb(struct rpmsg_device *rdev, const char *name, uint32_t dest)
167 {
168 	struct ipc_rpmsg_instance *rpmsg_inst;
169 	struct rpmsg_virtio_device *p_rvdev;
170 	struct ipc_rpmsg_ept *rpmsg_ept;
171 	bool ept_cached;
172 
173 	p_rvdev = CONTAINER_OF(rdev, struct rpmsg_virtio_device, rdev);
174 	rpmsg_inst = CONTAINER_OF(p_rvdev->shpool, struct ipc_rpmsg_instance, shm_pool);
175 
176 	if (name == NULL || name[0] == '\0') {
177 		return;
178 	}
179 
180 	k_mutex_lock(&rpmsg_inst->mtx, K_FOREVER);
181 	ept_cached = get_ept(rpmsg_inst, &rpmsg_ept, name);
182 
183 	if (rpmsg_ept == NULL) {
184 		k_mutex_unlock(&rpmsg_inst->mtx);
185 		return;
186 	}
187 
188 	if (ept_cached) {
189 		/*
190 		 * The endpoint was already registered by the HOST core. The
191 		 * endpoint can now be advertised to the REMOTE core.
192 		 */
193 		k_mutex_unlock(&rpmsg_inst->mtx);
194 		advertise_ept(rpmsg_inst, rpmsg_ept, name, dest);
195 	} else {
196 		/*
197 		 * The endpoint is not registered yet, this happens when the
198 		 * REMOTE core registers the endpoint before the HOST has
199 		 * had the chance to register it. Cache it saving name and
200 		 * destination address to be used by the next register_ept()
201 		 * call by the HOST core.
202 		 */
203 		strncpy(rpmsg_ept->name, name, sizeof(rpmsg_ept->name));
204 		rpmsg_ept->name[RPMSG_NAME_SIZE - 1] = '\0';
205 		rpmsg_ept->dest = dest;
206 		k_mutex_unlock(&rpmsg_inst->mtx);
207 	}
208 }
209 
bound_cb(struct ipc_rpmsg_ept * ept)210 static void bound_cb(struct ipc_rpmsg_ept *ept)
211 {
212 	rpmsg_send(&ept->ep, (uint8_t *)"", 0);
213 
214 	if (ept->cb->bound) {
215 		ept->cb->bound(ept->priv);
216 	}
217 }
218 
ept_cb(struct rpmsg_endpoint * ep,void * data,size_t len,uint32_t src,void * priv)219 static int ept_cb(struct rpmsg_endpoint *ep, void *data, size_t len, uint32_t src, void *priv)
220 {
221 	struct ipc_rpmsg_ept *ept;
222 
223 	ept = (struct ipc_rpmsg_ept *) priv;
224 
225 	/*
226 	 * the remote processor has send a ns announcement, we use an empty
227 	 * message to advice the remote side that a local endpoint has been
228 	 * created and that the processor is ready to communicate with this
229 	 * endpoint
230 	 *
231 	 * ipc_rpmsg_register_ept
232 	 *  rpmsg_send_ns_message --------------> ns_bind_cb
233 	 *                                        bound_cb
234 	 *                ept_cb <--------------- rpmsg_send [empty message]
235 	 *              bound_cb
236 	 */
237 	if (len == 0) {
238 		if (!ept->bound) {
239 			ept->bound = true;
240 			bound_cb(ept);
241 		}
242 		return RPMSG_SUCCESS;
243 	}
244 
245 	if (ept->cb->received) {
246 		ept->cb->received(data, len, ept->priv);
247 	}
248 
249 	return RPMSG_SUCCESS;
250 }
251 
vr_shm_configure(struct ipc_static_vrings * vr,const struct backend_config_t * conf)252 static int vr_shm_configure(struct ipc_static_vrings *vr, const struct backend_config_t *conf)
253 {
254 	unsigned int num_desc;
255 
256 	num_desc = optimal_num_desc(conf->shm_size, conf->buffer_size);
257 	if (num_desc == 0) {
258 		return -ENOMEM;
259 	}
260 
261 	/*
262 	 * conf->shm_addr  +--------------+  vr->status_reg_addr
263 	 *		   |    STATUS    |
264 	 *		   +--------------+  vr->shm_addr
265 	 *		   |              |
266 	 *		   |              |
267 	 *		   |   RX BUFS    |
268 	 *		   |              |
269 	 *		   |              |
270 	 *		   +--------------+
271 	 *		   |              |
272 	 *		   |              |
273 	 *		   |   TX BUFS    |
274 	 *		   |              |
275 	 *		   |              |
276 	 *		   +--------------+  vr->rx_addr (aligned)
277 	 *		   |   RX VRING   |
278 	 *		   +--------------+  vr->tx_addr (aligned)
279 	 *		   |   TX VRING   |
280 	 *		   +--------------+
281 	 */
282 
283 	vr->shm_addr = ROUND_UP(conf->shm_addr + VDEV_STATUS_SIZE, MEM_ALIGNMENT);
284 	vr->shm_size = shm_size(num_desc, conf->buffer_size);
285 
286 	vr->rx_addr = vr->shm_addr + VRING_COUNT * vq_ring_size(num_desc, conf->buffer_size);
287 	vr->tx_addr = ROUND_UP(vr->rx_addr + vring_size(num_desc, MEM_ALIGNMENT),
288 			       MEM_ALIGNMENT);
289 
290 	vr->status_reg_addr = conf->shm_addr;
291 
292 	vr->vring_size = num_desc;
293 
294 	return 0;
295 }
296 
virtio_notify_cb(struct virtqueue * vq,void * priv)297 static void virtio_notify_cb(struct virtqueue *vq, void *priv)
298 {
299 	struct backend_config_t *conf = priv;
300 
301 	if (conf->mbox_tx.dev) {
302 		mbox_send_dt(&conf->mbox_tx, NULL);
303 	}
304 }
305 
mbox_callback_process(struct k_work * item)306 static void mbox_callback_process(struct k_work *item)
307 {
308 	struct backend_data_t *data;
309 	unsigned int vq_id;
310 
311 	data = CONTAINER_OF(item, struct backend_data_t, mbox_work);
312 	vq_id = (data->role == ROLE_HOST) ? VIRTQUEUE_ID_HOST : VIRTQUEUE_ID_REMOTE;
313 
314 	virtqueue_notification(data->vr.vq[vq_id]);
315 }
316 
mbox_callback(const struct device * instance,uint32_t channel,void * user_data,struct mbox_msg * msg_data)317 static void mbox_callback(const struct device *instance, uint32_t channel,
318 			  void *user_data, struct mbox_msg *msg_data)
319 {
320 	struct backend_data_t *data = user_data;
321 
322 	k_work_submit_to_queue(&data->mbox_wq, &data->mbox_work);
323 }
324 
mbox_init(const struct device * instance)325 static int mbox_init(const struct device *instance)
326 {
327 	const struct backend_config_t *conf = instance->config;
328 	struct backend_data_t *data = instance->data;
329 	struct k_work_queue_config wq_cfg = {.name = instance->name};
330 	int prio, err;
331 
332 	prio = (conf->wq_prio_type == PRIO_COOP) ? K_PRIO_COOP(conf->wq_prio) :
333 						   K_PRIO_PREEMPT(conf->wq_prio);
334 
335 	k_work_queue_init(&data->mbox_wq);
336 	k_work_queue_start(&data->mbox_wq, mbox_stack[conf->id], WQ_STACK_SIZE, prio, &wq_cfg);
337 
338 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
339 		char name[THREAD_MAX_NAME_LEN];
340 
341 		snprintk(name, sizeof(name), "mbox_wq #%d", conf->id);
342 		k_thread_name_set(&data->mbox_wq.thread, name);
343 	}
344 
345 	k_work_init(&data->mbox_work, mbox_callback_process);
346 
347 	err = mbox_register_callback_dt(&conf->mbox_rx, mbox_callback, data);
348 	if (err != 0) {
349 		return err;
350 	}
351 
352 	return mbox_set_enabled_dt(&conf->mbox_rx, 1);
353 }
354 
mbox_deinit(const struct device * instance)355 static int mbox_deinit(const struct device *instance)
356 {
357 	const struct backend_config_t *conf = instance->config;
358 	struct backend_data_t *data = instance->data;
359 	k_tid_t wq_thread;
360 	int err;
361 
362 	err = mbox_set_enabled_dt(&conf->mbox_rx, 0);
363 	if (err != 0) {
364 		return err;
365 	}
366 
367 	k_work_queue_drain(&data->mbox_wq, 1);
368 
369 	wq_thread = k_work_queue_thread_get(&data->mbox_wq);
370 	k_thread_abort(wq_thread);
371 
372 	return 0;
373 }
374 
register_ept_on_host(struct ipc_rpmsg_instance * rpmsg_inst,const struct ipc_ept_cfg * cfg)375 static struct ipc_rpmsg_ept *register_ept_on_host(struct ipc_rpmsg_instance *rpmsg_inst,
376 						  const struct ipc_ept_cfg *cfg)
377 {
378 	struct ipc_rpmsg_ept *rpmsg_ept;
379 	bool ept_cached;
380 
381 	k_mutex_lock(&rpmsg_inst->mtx, K_FOREVER);
382 
383 	ept_cached = get_ept(rpmsg_inst, &rpmsg_ept, cfg->name);
384 	if (rpmsg_ept == NULL) {
385 		k_mutex_unlock(&rpmsg_inst->mtx);
386 		return NULL;
387 	}
388 
389 	rpmsg_ept->cb = &cfg->cb;
390 	rpmsg_ept->priv = cfg->priv;
391 	rpmsg_ept->bound = false;
392 	rpmsg_ept->ep.priv = rpmsg_ept;
393 
394 	if (ept_cached) {
395 		/*
396 		 * The endpoint was cached in the NS bind callback. We can finally
397 		 * advertise it.
398 		 */
399 		k_mutex_unlock(&rpmsg_inst->mtx);
400 		advertise_ept(rpmsg_inst, rpmsg_ept, cfg->name, rpmsg_ept->dest);
401 	} else {
402 		/*
403 		 * There is no endpoint in the cache because the REMOTE has
404 		 * not registered the endpoint yet. Cache it.
405 		 */
406 		strncpy(rpmsg_ept->name, cfg->name, sizeof(rpmsg_ept->name));
407 		rpmsg_ept->name[RPMSG_NAME_SIZE - 1] = '\0';
408 		k_mutex_unlock(&rpmsg_inst->mtx);
409 	}
410 
411 	return rpmsg_ept;
412 }
413 
register_ept_on_remote(struct ipc_rpmsg_instance * rpmsg_inst,const struct ipc_ept_cfg * cfg)414 static struct ipc_rpmsg_ept *register_ept_on_remote(struct ipc_rpmsg_instance *rpmsg_inst,
415 						    const struct ipc_ept_cfg *cfg)
416 {
417 	struct ipc_rpmsg_ept *rpmsg_ept;
418 	int err;
419 
420 	rpmsg_ept = get_available_ept_slot(rpmsg_inst);
421 	if (rpmsg_ept == NULL) {
422 		return NULL;
423 	}
424 
425 	rpmsg_ept->cb = &cfg->cb;
426 	rpmsg_ept->priv = cfg->priv;
427 	rpmsg_ept->bound = false;
428 	rpmsg_ept->ep.priv = rpmsg_ept;
429 
430 	strncpy(rpmsg_ept->name, cfg->name, sizeof(rpmsg_ept->name));
431 	rpmsg_ept->name[RPMSG_NAME_SIZE - 1] = '\0';
432 
433 	err = ipc_rpmsg_register_ept(rpmsg_inst, RPMSG_REMOTE, rpmsg_ept);
434 	if (err != 0) {
435 		return NULL;
436 	}
437 
438 	return rpmsg_ept;
439 }
440 
register_ept(const struct device * instance,void ** token,const struct ipc_ept_cfg * cfg)441 static int register_ept(const struct device *instance, void **token,
442 			const struct ipc_ept_cfg *cfg)
443 {
444 	struct backend_data_t *data = instance->data;
445 	struct ipc_rpmsg_instance *rpmsg_inst;
446 	struct ipc_rpmsg_ept *rpmsg_ept;
447 
448 	/* Instance is not ready */
449 	if (atomic_get(&data->state) != STATE_INITED) {
450 		return -EBUSY;
451 	}
452 
453 	/* Empty name is not valid */
454 	if (cfg->name == NULL || cfg->name[0] == '\0') {
455 		return -EINVAL;
456 	}
457 
458 	rpmsg_inst = &data->rpmsg_inst;
459 
460 	rpmsg_ept = (data->role == ROLE_HOST) ?
461 			register_ept_on_host(rpmsg_inst, cfg) :
462 			register_ept_on_remote(rpmsg_inst, cfg);
463 	if (rpmsg_ept == NULL) {
464 		return -EINVAL;
465 	}
466 
467 	(*token) = rpmsg_ept;
468 
469 	return 0;
470 }
471 
deregister_ept(const struct device * instance,void * token)472 static int deregister_ept(const struct device *instance, void *token)
473 {
474 	struct backend_data_t *data = instance->data;
475 	struct ipc_rpmsg_ept *rpmsg_ept;
476 	static struct k_work_sync sync;
477 
478 	/* Instance is not ready */
479 	if (atomic_get(&data->state) != STATE_INITED) {
480 		return -EBUSY;
481 	}
482 
483 	rpmsg_ept = (struct ipc_rpmsg_ept *) token;
484 
485 	/* Endpoint is not registered with instance */
486 	if (!rpmsg_ept) {
487 		return -ENOENT;
488 	}
489 
490 	/* Drain pending work items before tearing down channel.
491 	 *
492 	 * Note: `k_work_flush` Faults on Cortex-M33 with "illegal use of EPSR"
493 	 * if `sync` is not declared static.
494 	 */
495 	k_work_flush(&data->mbox_work, &sync);
496 
497 	rpmsg_destroy_ept(&rpmsg_ept->ep);
498 
499 	memset(rpmsg_ept, 0, sizeof(struct ipc_rpmsg_ept));
500 
501 	return 0;
502 }
503 
send(const struct device * instance,void * token,const void * msg,size_t len)504 static int send(const struct device *instance, void *token,
505 		const void *msg, size_t len)
506 {
507 	struct backend_data_t *data = instance->data;
508 	struct ipc_rpmsg_ept *rpmsg_ept;
509 	int ret;
510 
511 	/* Instance is not ready */
512 	if (atomic_get(&data->state) != STATE_INITED) {
513 		return -EBUSY;
514 	}
515 
516 	/* Empty message is not allowed */
517 	if (len == 0) {
518 		return -EBADMSG;
519 	}
520 
521 	rpmsg_ept = (struct ipc_rpmsg_ept *) token;
522 
523 	/* Endpoint is not registered with instance */
524 	if (!rpmsg_ept) {
525 		return -ENOENT;
526 	}
527 
528 	ret = rpmsg_send(&rpmsg_ept->ep, msg, len);
529 
530 	/* No buffers available */
531 	if (ret == RPMSG_ERR_NO_BUFF) {
532 		return -ENOMEM;
533 	}
534 
535 	return ret;
536 }
537 
send_nocopy(const struct device * instance,void * token,const void * msg,size_t len)538 static int send_nocopy(const struct device *instance, void *token,
539 		       const void *msg, size_t len)
540 {
541 	struct backend_data_t *data = instance->data;
542 	struct ipc_rpmsg_ept *rpmsg_ept;
543 
544 	/* Instance is not ready */
545 	if (atomic_get(&data->state) != STATE_INITED) {
546 		return -EBUSY;
547 	}
548 
549 	/* Empty message is not allowed */
550 	if (len == 0) {
551 		return -EBADMSG;
552 	}
553 
554 	rpmsg_ept = (struct ipc_rpmsg_ept *) token;
555 
556 	/* Endpoint is not registered with instance */
557 	if (!rpmsg_ept) {
558 		return -ENOENT;
559 	}
560 
561 	return rpmsg_send_nocopy(&rpmsg_ept->ep, msg, len);
562 }
563 
open(const struct device * instance)564 static int open(const struct device *instance)
565 {
566 	const struct backend_config_t *conf = instance->config;
567 	struct backend_data_t *data = instance->data;
568 	struct ipc_rpmsg_instance *rpmsg_inst;
569 	struct rpmsg_device *rdev;
570 	int err;
571 
572 	if (!atomic_cas(&data->state, STATE_READY, STATE_BUSY)) {
573 		return -EALREADY;
574 	}
575 
576 	err = vr_shm_configure(&data->vr, conf);
577 	if (err != 0) {
578 		goto error;
579 	}
580 
581 	data->vr.notify_cb = virtio_notify_cb;
582 	data->vr.priv = (void *) conf;
583 
584 	err = ipc_static_vrings_init(&data->vr, conf->role);
585 	if (err != 0) {
586 		goto error;
587 	}
588 
589 	err = mbox_init(instance);
590 	if (err != 0) {
591 		goto error;
592 	}
593 
594 	rpmsg_inst = &data->rpmsg_inst;
595 
596 	rpmsg_inst->bound_cb = bound_cb;
597 	rpmsg_inst->cb = ept_cb;
598 
599 	err = ipc_rpmsg_init(rpmsg_inst, data->role, conf->buffer_size,
600 			     &data->vr.shm_io, &data->vr.vdev,
601 			     (void *)data->vr.shm_addr,
602 			     data->vr.shm_size, ns_bind_cb);
603 	if (err != 0) {
604 		goto error;
605 	}
606 
607 	rdev = rpmsg_virtio_get_rpmsg_device(&rpmsg_inst->rvdev);
608 
609 	data->tx_buffer_size = rpmsg_virtio_get_buffer_size(rdev);
610 	if (data->tx_buffer_size < 0) {
611 		err = -EINVAL;
612 		goto error;
613 	}
614 
615 	atomic_set(&data->state, STATE_INITED);
616 	return 0;
617 
618 error:
619 	/* Back to the ready state */
620 	atomic_set(&data->state, STATE_READY);
621 	return err;
622 
623 }
624 
close(const struct device * instance)625 static int close(const struct device *instance)
626 {
627 	const struct backend_config_t *conf = instance->config;
628 	struct backend_data_t *data = instance->data;
629 	struct ipc_rpmsg_instance *rpmsg_inst;
630 	int err;
631 
632 	if (!atomic_cas(&data->state, STATE_INITED, STATE_BUSY)) {
633 		return -EALREADY;
634 	}
635 
636 	rpmsg_inst = &data->rpmsg_inst;
637 
638 	if (!check_endpoints_freed(rpmsg_inst)) {
639 		return -EBUSY;
640 	}
641 
642 	err = ipc_rpmsg_deinit(rpmsg_inst, data->role);
643 	if (err != 0) {
644 		goto error;
645 	}
646 
647 	err = mbox_deinit(instance);
648 	if (err != 0) {
649 		goto error;
650 	}
651 
652 	err = ipc_static_vrings_deinit(&data->vr, conf->role);
653 	if (err != 0) {
654 		goto error;
655 	}
656 
657 	memset(&data->vr, 0, sizeof(struct ipc_static_vrings));
658 	memset(rpmsg_inst, 0, sizeof(struct ipc_rpmsg_instance));
659 
660 	atomic_set(&data->state, STATE_READY);
661 	return 0;
662 
663 error:
664 	/* Back to the inited state */
665 	atomic_set(&data->state, STATE_INITED);
666 	return err;
667 }
668 
get_tx_buffer_size(const struct device * instance,void * token)669 static int get_tx_buffer_size(const struct device *instance, void *token)
670 {
671 	struct backend_data_t *data = instance->data;
672 
673 	return data->tx_buffer_size;
674 }
675 
get_tx_buffer(const struct device * instance,void * token,void ** r_data,uint32_t * size,k_timeout_t wait)676 static int get_tx_buffer(const struct device *instance, void *token,
677 			 void **r_data, uint32_t *size, k_timeout_t wait)
678 {
679 	struct backend_data_t *data = instance->data;
680 	struct ipc_rpmsg_ept *rpmsg_ept;
681 	void *payload;
682 
683 	rpmsg_ept = (struct ipc_rpmsg_ept *) token;
684 
685 	/* Endpoint is not registered with instance */
686 	if (!rpmsg_ept) {
687 		return -ENOENT;
688 	}
689 
690 	if (!r_data || !size) {
691 		return -EINVAL;
692 	}
693 
694 	/* OpenAMP only supports a binary wait / no-wait */
695 	if (!K_TIMEOUT_EQ(wait, K_FOREVER) && !K_TIMEOUT_EQ(wait, K_NO_WAIT)) {
696 		return -ENOTSUP;
697 	}
698 
699 	/* The user requested a specific size */
700 	if ((*size) && (*size > data->tx_buffer_size)) {
701 		/* Too big to fit */
702 		*size = data->tx_buffer_size;
703 		return -ENOMEM;
704 	}
705 
706 	/*
707 	 * OpenAMP doesn't really have the concept of forever but instead it
708 	 * gives up after 15 seconds.  In that case, just keep retrying.
709 	 */
710 	do {
711 		payload = rpmsg_get_tx_payload_buffer(&rpmsg_ept->ep, size,
712 						      K_TIMEOUT_EQ(wait, K_FOREVER));
713 	} while ((!payload) && K_TIMEOUT_EQ(wait, K_FOREVER));
714 
715 	/* This should really only be valid for K_NO_WAIT */
716 	if (!payload) {
717 		return -ENOBUFS;
718 	}
719 
720 	(*r_data) = payload;
721 
722 	return 0;
723 }
724 
hold_rx_buffer(const struct device * instance,void * token,void * data)725 static int hold_rx_buffer(const struct device *instance, void *token,
726 			  void *data)
727 {
728 	struct ipc_rpmsg_ept *rpmsg_ept;
729 
730 	rpmsg_ept = (struct ipc_rpmsg_ept *) token;
731 
732 	/* Endpoint is not registered with instance */
733 	if (!rpmsg_ept) {
734 		return -ENOENT;
735 	}
736 
737 	rpmsg_hold_rx_buffer(&rpmsg_ept->ep, data);
738 
739 	return 0;
740 }
741 
release_rx_buffer(const struct device * instance,void * token,void * data)742 static int release_rx_buffer(const struct device *instance, void *token,
743 			     void *data)
744 {
745 	struct ipc_rpmsg_ept *rpmsg_ept;
746 
747 	rpmsg_ept = (struct ipc_rpmsg_ept *) token;
748 
749 	/* Endpoint is not registered with instance */
750 	if (!rpmsg_ept) {
751 		return -ENOENT;
752 	}
753 
754 	rpmsg_release_rx_buffer(&rpmsg_ept->ep, data);
755 
756 	return 0;
757 }
758 
drop_tx_buffer(const struct device * instance,void * token,const void * data)759 static int drop_tx_buffer(const struct device *instance, void *token,
760 			  const void *data)
761 {
762 	/* Not yet supported by OpenAMP */
763 	return -ENOTSUP;
764 }
765 
766 const static struct ipc_service_backend backend_ops = {
767 	.open_instance = open,
768 	.close_instance = close,
769 	.register_endpoint = register_ept,
770 	.deregister_endpoint = deregister_ept,
771 	.send = send,
772 	.send_nocopy = send_nocopy,
773 	.drop_tx_buffer = drop_tx_buffer,
774 	.get_tx_buffer = get_tx_buffer,
775 	.get_tx_buffer_size = get_tx_buffer_size,
776 	.hold_rx_buffer = hold_rx_buffer,
777 	.release_rx_buffer = release_rx_buffer,
778 };
779 
backend_init(const struct device * instance)780 static int backend_init(const struct device *instance)
781 {
782 	const struct backend_config_t *conf = instance->config;
783 	struct backend_data_t *data = instance->data;
784 
785 	data->role = conf->role;
786 
787 #if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_DCACHE)
788 	__ASSERT((VDEV_STATUS_SIZE % sys_cache_data_line_size_get()) == 0U,
789 		  "VDEV status area must be aligned to the cache line");
790 	__ASSERT((MEM_ALIGNMENT % sys_cache_data_line_size_get()) == 0U,
791 		  "Static VRINGs must be aligned to the cache line");
792 	__ASSERT((conf->buffer_size % sys_cache_data_line_size_get()) == 0U,
793 		  "Buffers must be aligned to the cache line ");
794 #endif
795 
796 	k_mutex_init(&data->rpmsg_inst.mtx);
797 	atomic_set(&data->state, STATE_READY);
798 
799 	return 0;
800 }
801 
802 
803 #if defined(CONFIG_ARCH_POSIX)
804 #define BACKEND_PRE(i) extern char IPC##i##_shm_buffer[];
805 #define BACKEND_SHM_ADDR(i) (const uintptr_t)IPC##i##_shm_buffer
806 #else
807 #define BACKEND_PRE(i)
808 #define BACKEND_SHM_ADDR(i) DT_REG_ADDR(DT_INST_PHANDLE(i, memory_region))
809 #endif /* defined(CONFIG_ARCH_POSIX) */
810 
811 #define DEFINE_BACKEND_DEVICE(i)							\
812 	BACKEND_PRE(i)									\
813 	static struct backend_config_t backend_config_##i = {				\
814 		.role = DT_ENUM_IDX_OR(DT_DRV_INST(i), role, ROLE_HOST),		\
815 		.shm_size = DT_REG_SIZE(DT_INST_PHANDLE(i, memory_region)),		\
816 		.shm_addr = BACKEND_SHM_ADDR(i),					\
817 		.mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx),				\
818 		.mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx),				\
819 		.wq_prio = COND_CODE_1(DT_INST_NODE_HAS_PROP(i, zephyr_priority),	\
820 			   (DT_INST_PROP_BY_IDX(i, zephyr_priority, 0)),		\
821 			   (0)),							\
822 		.wq_prio_type = COND_CODE_1(DT_INST_NODE_HAS_PROP(i, zephyr_priority),	\
823 			   (DT_INST_PROP_BY_IDX(i, zephyr_priority, 1)),		\
824 			   (PRIO_PREEMPT)),						\
825 		.buffer_size = DT_INST_PROP_OR(i, zephyr_buffer_size,			\
826 					       RPMSG_BUFFER_SIZE),			\
827 		.id = i,								\
828 	};										\
829 											\
830 	static struct backend_data_t backend_data_##i;					\
831 											\
832 	DEVICE_DT_INST_DEFINE(i,							\
833 			 &backend_init,							\
834 			 NULL,								\
835 			 &backend_data_##i,						\
836 			 &backend_config_##i,						\
837 			 POST_KERNEL,							\
838 			 CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY,			\
839 			 &backend_ops);
840 
DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)841 DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)
842 
843 #define BACKEND_CONFIG_INIT(n) &backend_config_##n,
844 
845 #if defined(CONFIG_IPC_SERVICE_BACKEND_RPMSG_SHMEM_RESET)
846 static int shared_memory_prepare(void)
847 {
848 	static const struct backend_config_t *config[] = {
849 		DT_INST_FOREACH_STATUS_OKAY(BACKEND_CONFIG_INIT)
850 	};
851 
852 	for (int i = 0; i < DT_NUM_INST_STATUS_OKAY(DT_DRV_COMPAT); i++) {
853 		if (config[i]->role == ROLE_HOST) {
854 			memset((void *) config[i]->shm_addr, 0, VDEV_STATUS_SIZE);
855 		}
856 	}
857 
858 	return 0;
859 }
860 
861 SYS_INIT(shared_memory_prepare, PRE_KERNEL_1, 1);
862 #endif /* CONFIG_IPC_SERVICE_BACKEND_RPMSG_SHMEM_RESET */
863