1 /*
2  * Copyright (c) 2014, Mentor Graphics Corporation
3  * All rights reserved.
4  * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  * Copyright (c) 2018 Linaro, Inc. All rights reserved.
6  * Copyright (c) 2021 Nordic Semiconductor ASA
7  *
8  * SPDX-License-Identifier: BSD-3-Clause
9  */
10 
11 #include <metal/alloc.h>
12 #include <metal/sleep.h>
13 #include <metal/sys.h>
14 #include <metal/utilities.h>
15 #include <openamp/rpmsg_virtio.h>
16 #include <openamp/virtqueue.h>
17 
18 #include "rpmsg_internal.h"
19 
20 #define RPMSG_NUM_VRINGS                        2
21 
22 /* Total tick count for 15secs - 1usec tick. */
23 #define RPMSG_TICK_COUNT                        15000000
24 
25 /* Time to wait - In multiple of 1 msecs. */
26 #define RPMSG_TICKS_PER_INTERVAL                1000
27 
28 /*
29  * Get the buffer held counter value.
30  * If 0 the buffer can be released
31  */
32 #define RPMSG_BUF_HELD_COUNTER(rp_hdr)          \
33 	(((rp_hdr)->reserved & RPMSG_BUF_HELD_MASK) >> RPMSG_BUF_HELD_SHIFT)
34 
35 /* Increase buffer held counter */
36 #define RPMSG_BUF_HELD_INC(rp_hdr)              \
37 	((rp_hdr)->reserved += 1 << RPMSG_BUF_HELD_SHIFT)
38 
39 /* Decrease buffer held counter */
40 #define RPMSG_BUF_HELD_DEC(rp_hdr)              \
41 	((rp_hdr)->reserved -= 1 << RPMSG_BUF_HELD_SHIFT)
42 
43 /* Get the buffer index */
44 #define RPMSG_BUF_INDEX(rphdr)                  \
45 	((uint16_t)((rp_hdr)->reserved & ~RPMSG_BUF_HELD_MASK))
46 
47 /**
48  * struct vbuff_reclaimer_t - vring buffer recycler
49  *
50  * This structure is used by the rpmsg virtio to store unused virtio buffer, as the
51  * virtqueue structure has been already updated and memory allocated.
52  *
53  * @param node	node in reclaimer list.
54  * @param idx	virtio descriptor index containing the buffer information.
55  */
56 struct vbuff_reclaimer_t {
57 	struct metal_list node;
58 	uint16_t idx;
59 };
60 
61 /* Default configuration */
62 #if VIRTIO_ENABLED(VIRTIO_DRIVER_SUPPORT)
63 #define RPMSG_VIRTIO_DEFAULT_CONFIG                \
64 	(&(const struct rpmsg_virtio_config) {     \
65 		.h2r_buf_size = RPMSG_BUFFER_SIZE, \
66 		.r2h_buf_size = RPMSG_BUFFER_SIZE, \
67 		.split_shpool = false,             \
68 	})
69 #else
70 #define RPMSG_VIRTIO_DEFAULT_CONFIG          NULL
71 #endif
72 
73 #if VIRTIO_ENABLED(VIRTIO_DRIVER_SUPPORT)
74 metal_weak void *
rpmsg_virtio_shm_pool_get_buffer(struct rpmsg_virtio_shm_pool * shpool,size_t size)75 rpmsg_virtio_shm_pool_get_buffer(struct rpmsg_virtio_shm_pool *shpool,
76 				 size_t size)
77 {
78 	void *buffer;
79 
80 	if (!shpool || size == 0 || shpool->avail < size)
81 		return NULL;
82 	buffer = (char *)shpool->base + shpool->size - shpool->avail;
83 	shpool->avail -= size;
84 
85 	return buffer;
86 }
87 #endif
88 
rpmsg_virtio_init_shm_pool(struct rpmsg_virtio_shm_pool * shpool,void * shb,size_t size)89 void rpmsg_virtio_init_shm_pool(struct rpmsg_virtio_shm_pool *shpool,
90 				void *shb, size_t size)
91 {
92 	if (!shpool || !shb || size == 0)
93 		return;
94 	shpool->base = shb;
95 	shpool->size = size;
96 	shpool->avail = size;
97 }
98 
99 /**
100  * @internal
101  *
102  * @brief Places the used buffer back on the virtqueue.
103  *
104  * @param rvdev		Pointer to remote core
105  * @param buffer	Buffer pointer
106  * @param len		Buffer length
107  * @param idx		Buffer index
108  */
rpmsg_virtio_return_buffer(struct rpmsg_virtio_device * rvdev,void * buffer,uint32_t len,uint16_t idx)109 static void rpmsg_virtio_return_buffer(struct rpmsg_virtio_device *rvdev,
110 				       void *buffer, uint32_t len,
111 				       uint16_t idx)
112 {
113 	int ret;
114 
115 	BUFFER_INVALIDATE(buffer, len);
116 
117 	if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev)) {
118 		struct virtqueue_buf vqbuf;
119 
120 		(void)idx;
121 		/* Initialize buffer node */
122 		vqbuf.buf = buffer;
123 		vqbuf.len = len;
124 		ret = virtqueue_add_buffer(rvdev->rvq, &vqbuf, 0, 1, buffer);
125 		RPMSG_ASSERT(ret == VQUEUE_SUCCESS, "add buffer failed\r\n");
126 	}
127 
128 	if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev)) {
129 		(void)buffer;
130 		ret = virtqueue_add_consumed_buffer(rvdev->rvq, idx, len);
131 		RPMSG_ASSERT(ret == VQUEUE_SUCCESS, "add consumed buffer failed\r\n");
132 	}
133 }
134 
135 /**
136  * @internal
137  *
138  * @brief Places buffer on the virtqueue for consumption by the other side.
139  *
140  * @param rvdev		Pointer to rpmsg virtio
141  * @param buffer	Buffer pointer
142  * @param len		Buffer length
143  * @param idx		Buffer index
144  *
145  * @return Status of function execution
146  */
rpmsg_virtio_enqueue_buffer(struct rpmsg_virtio_device * rvdev,void * buffer,uint32_t len,uint16_t idx)147 static int rpmsg_virtio_enqueue_buffer(struct rpmsg_virtio_device *rvdev,
148 				       void *buffer, uint32_t len,
149 				       uint16_t idx)
150 {
151 	BUFFER_FLUSH(buffer, len);
152 
153 	if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev)) {
154 		struct virtqueue_buf vqbuf;
155 		(void)idx;
156 
157 		/* Initialize buffer node */
158 		vqbuf.buf = buffer;
159 		vqbuf.len = len;
160 		return virtqueue_add_buffer(rvdev->svq, &vqbuf, 1, 0, buffer);
161 	}
162 
163 	if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev)) {
164 		(void)buffer;
165 		return virtqueue_add_consumed_buffer(rvdev->svq, idx, len);
166 	}
167 
168 	return 0;
169 }
170 
171 /**
172  * @internal
173  *
174  * @brief Provides buffer to transmit messages.
175  *
176  * @param rvdev	Pointer to rpmsg device
177  * @param len	Length of returned buffer
178  * @param idx	Buffer index
179  *
180  * @return Pointer to buffer.
181  */
rpmsg_virtio_get_tx_buffer(struct rpmsg_virtio_device * rvdev,uint32_t * len,uint16_t * idx)182 static void *rpmsg_virtio_get_tx_buffer(struct rpmsg_virtio_device *rvdev,
183 					uint32_t *len, uint16_t *idx)
184 {
185 	struct metal_list *node;
186 	struct vbuff_reclaimer_t *r_desc;
187 	void *data = NULL;
188 
189 	/* Try first to recycle a buffer that has been freed without been used */
190 	node = metal_list_first(&rvdev->reclaimer);
191 	if (node) {
192 		r_desc = metal_container_of(node, struct vbuff_reclaimer_t, node);
193 		metal_list_del(node);
194 		data = r_desc;
195 		*idx = r_desc->idx;
196 
197 		if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev))
198 			*len = rvdev->config.h2r_buf_size;
199 		if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev))
200 			*len = virtqueue_get_buffer_length(rvdev->svq, *idx);
201 	} else if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev)) {
202 		data = virtqueue_get_buffer(rvdev->svq, len, idx);
203 		if (!data && rvdev->svq->vq_free_cnt) {
204 			data = rpmsg_virtio_shm_pool_get_buffer(rvdev->shpool,
205 					rvdev->config.h2r_buf_size);
206 			*len = rvdev->config.h2r_buf_size;
207 			*idx = 0;
208 		}
209 	} else if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev)) {
210 		data = virtqueue_get_available_buffer(rvdev->svq, idx, len);
211 	}
212 
213 	return data;
214 }
215 
216 /**
217  * @internal
218  *
219  * @brief Retrieves the received buffer from the virtqueue.
220  *
221  * @param rvdev	Pointer to rpmsg device
222  * @param len	Size of received buffer
223  * @param idx	Index of buffer
224  *
225  * @return Pointer to received buffer
226  */
rpmsg_virtio_get_rx_buffer(struct rpmsg_virtio_device * rvdev,uint32_t * len,uint16_t * idx)227 static void *rpmsg_virtio_get_rx_buffer(struct rpmsg_virtio_device *rvdev,
228 					uint32_t *len, uint16_t *idx)
229 {
230 	void *data = NULL;
231 
232 	if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev)) {
233 		data = virtqueue_get_buffer(rvdev->rvq, len, idx);
234 	}
235 
236 	if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev)) {
237 		data =
238 		    virtqueue_get_available_buffer(rvdev->rvq, idx, len);
239 	}
240 
241 	/* Invalidate the buffer before returning it */
242 	if (data)
243 		BUFFER_INVALIDATE(data, *len);
244 
245 	return data;
246 }
247 
248 /**
249  * @internal
250  *
251  * @brief Check if the remote is ready to start RPMsg communication
252  *
253  * @param rvdev Pointer to rpmsg_virtio device
254  *
255  * @return 0 on success, otherwise error code.
256  */
rpmsg_virtio_wait_remote_ready(struct rpmsg_virtio_device * rvdev)257 static int rpmsg_virtio_wait_remote_ready(struct rpmsg_virtio_device *rvdev)
258 {
259 	uint8_t status;
260 	int ret;
261 
262 	while (1) {
263 		ret = virtio_get_status(rvdev->vdev, &status);
264 		if (ret)
265 			return ret;
266 		/* Busy wait until the remote is ready */
267 		if (status & VIRTIO_CONFIG_STATUS_NEEDS_RESET) {
268 			ret = virtio_set_status(rvdev->vdev, 0);
269 			if (ret)
270 				return ret;
271 			/* TODO notify remote processor */
272 		} else if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) {
273 			return 0;
274 		}
275 		metal_yield();
276 	}
277 }
278 
279 /**
280  * @internal
281  *
282  * @brief Check whether rpmsg buffer needs to be released or not
283  *
284  * @param rp_hdr	Pointer to rpmsg buffer header
285  *
286  * @return true indicates this buffer needs to be released
287  */
rpmsg_virtio_buf_held_dec_test(struct rpmsg_hdr * rp_hdr)288 static bool rpmsg_virtio_buf_held_dec_test(struct rpmsg_hdr *rp_hdr)
289 {
290 	/* Check the held counter first */
291 	if (RPMSG_BUF_HELD_COUNTER(rp_hdr) <= 0) {
292 		metal_err("unexpected buffer held counter\r\n");
293 		return false;
294 	}
295 
296 	/* Decrease the held counter */
297 	RPMSG_BUF_HELD_DEC(rp_hdr);
298 
299 	/* Check whether to release the buffer */
300 	if (RPMSG_BUF_HELD_COUNTER(rp_hdr) > 0)
301 		return false;
302 
303 	return true;
304 }
305 
rpmsg_virtio_hold_rx_buffer(struct rpmsg_device * rdev,void * rxbuf)306 static void rpmsg_virtio_hold_rx_buffer(struct rpmsg_device *rdev, void *rxbuf)
307 {
308 	metal_mutex_acquire(&rdev->lock);
309 	RPMSG_BUF_HELD_INC(RPMSG_LOCATE_HDR(rxbuf));
310 	metal_mutex_release(&rdev->lock);
311 }
312 
rpmsg_virtio_release_rx_buffer_nolock(struct rpmsg_virtio_device * rvdev,struct rpmsg_hdr * rp_hdr)313 static bool rpmsg_virtio_release_rx_buffer_nolock(struct rpmsg_virtio_device *rvdev,
314 						  struct rpmsg_hdr *rp_hdr)
315 {
316 	uint16_t idx;
317 	uint32_t len;
318 
319 	/* The reserved field contains buffer index */
320 	idx = RPMSG_BUF_INDEX(rp_hdr);
321 	/* Return buffer on virtqueue. */
322 	len = virtqueue_get_buffer_length(rvdev->rvq, idx);
323 	rpmsg_virtio_return_buffer(rvdev, rp_hdr, len, idx);
324 
325 	return true;
326 }
327 
rpmsg_virtio_release_rx_buffer(struct rpmsg_device * rdev,void * rxbuf)328 static void rpmsg_virtio_release_rx_buffer(struct rpmsg_device *rdev,
329 					   void *rxbuf)
330 {
331 	struct rpmsg_virtio_device *rvdev;
332 	struct rpmsg_hdr *rp_hdr;
333 
334 	rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev);
335 	rp_hdr = RPMSG_LOCATE_HDR(rxbuf);
336 
337 	metal_mutex_acquire(&rdev->lock);
338 	if (rpmsg_virtio_buf_held_dec_test(rp_hdr)) {
339 		rpmsg_virtio_release_rx_buffer_nolock(rvdev, rp_hdr);
340 		/* Tell peer we returned an rx buffer */
341 		virtqueue_kick(rvdev->rvq);
342 	}
343 	metal_mutex_release(&rdev->lock);
344 }
345 
rpmsg_virtio_notify_wait(struct rpmsg_virtio_device * rvdev,struct virtqueue * vq)346 static int rpmsg_virtio_notify_wait(struct rpmsg_virtio_device *rvdev, struct virtqueue *vq)
347 {
348 	struct virtio_vring_info *vring_info;
349 
350 	vring_info = &rvdev->vdev->vrings_info[vq->vq_queue_index];
351 
352 	if (!rvdev->notify_wait_cb)
353 		return RPMSG_EOPNOTSUPP;
354 
355 	return rvdev->notify_wait_cb(&rvdev->rdev, vring_info->notifyid);
356 }
357 
rpmsg_virtio_get_tx_payload_buffer(struct rpmsg_device * rdev,uint32_t * len,int wait)358 static void *rpmsg_virtio_get_tx_payload_buffer(struct rpmsg_device *rdev,
359 						uint32_t *len, int wait)
360 {
361 	struct rpmsg_virtio_device *rvdev;
362 	struct rpmsg_hdr *rp_hdr;
363 	uint8_t virtio_status;
364 	uint16_t idx;
365 	int tick_count;
366 	int status;
367 
368 	/* Get the associated remote device for channel. */
369 	rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev);
370 
371 	/* Validate device state */
372 	status = virtio_get_status(rvdev->vdev, &virtio_status);
373 	if (status || !(virtio_status & VIRTIO_CONFIG_STATUS_DRIVER_OK))
374 		return NULL;
375 
376 	if (wait)
377 		tick_count = RPMSG_TICK_COUNT / RPMSG_TICKS_PER_INTERVAL;
378 	else
379 		tick_count = 0;
380 
381 	while (1) {
382 		/* Lock the device to enable exclusive access to virtqueues */
383 		metal_mutex_acquire(&rdev->lock);
384 		rp_hdr = rpmsg_virtio_get_tx_buffer(rvdev, len, &idx);
385 		metal_mutex_release(&rdev->lock);
386 		if (rp_hdr || !tick_count)
387 			break;
388 
389 		/*
390 		 * Try to use wait loop implemented in the virtio dispatcher and
391 		 * use metal_sleep_usec() method by default.
392 		 */
393 		status = rpmsg_virtio_notify_wait(rvdev, rvdev->rvq);
394 		if (status == RPMSG_EOPNOTSUPP) {
395 			metal_sleep_usec(RPMSG_TICKS_PER_INTERVAL);
396 			tick_count--;
397 		} else if (status != RPMSG_SUCCESS) {
398 			break;
399 		}
400 	}
401 
402 	if (!rp_hdr)
403 		return NULL;
404 
405 	/* Store the index into the reserved field to be used when sending */
406 	rp_hdr->reserved = idx;
407 
408 	/* Increase the held counter to hold this Tx buffer */
409 	RPMSG_BUF_HELD_INC(rp_hdr);
410 
411 	/* Actual data buffer size is vring buffer size minus header length */
412 	*len -= sizeof(struct rpmsg_hdr);
413 	return RPMSG_LOCATE_DATA(rp_hdr);
414 }
415 
rpmsg_virtio_send_offchannel_nocopy(struct rpmsg_device * rdev,uint32_t src,uint32_t dst,const void * data,int len)416 static int rpmsg_virtio_send_offchannel_nocopy(struct rpmsg_device *rdev,
417 					       uint32_t src, uint32_t dst,
418 					       const void *data, int len)
419 {
420 	struct rpmsg_virtio_device *rvdev;
421 	struct metal_io_region *io;
422 	struct rpmsg_hdr rp_hdr;
423 	struct rpmsg_hdr *hdr;
424 	uint32_t buff_len;
425 	uint16_t idx;
426 	int status;
427 
428 	/* Get the associated remote device for channel. */
429 	rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev);
430 
431 	hdr = RPMSG_LOCATE_HDR(data);
432 	/* The reserved field contains buffer index */
433 	idx = hdr->reserved;
434 
435 	/* Initialize RPMSG header. */
436 	rp_hdr.dst = dst;
437 	rp_hdr.src = src;
438 	rp_hdr.len = len;
439 	rp_hdr.reserved = 0;
440 	rp_hdr.flags = 0;
441 
442 	/* Copy data to rpmsg buffer. */
443 	io = rvdev->shbuf_io;
444 	status = metal_io_block_write(io, metal_io_virt_to_offset(io, hdr),
445 				      &rp_hdr, sizeof(rp_hdr));
446 	RPMSG_ASSERT(status == sizeof(rp_hdr), "failed to write header\r\n");
447 
448 	metal_mutex_acquire(&rdev->lock);
449 
450 	if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev))
451 		buff_len = rvdev->config.h2r_buf_size;
452 	else
453 		buff_len = virtqueue_get_buffer_length(rvdev->svq, idx);
454 
455 	/* Enqueue buffer on virtqueue. */
456 	status = rpmsg_virtio_enqueue_buffer(rvdev, hdr, buff_len, idx);
457 	RPMSG_ASSERT(status == VQUEUE_SUCCESS, "failed to enqueue buffer\r\n");
458 	/* Let the other side know that there is a job to process. */
459 	virtqueue_kick(rvdev->svq);
460 
461 	metal_mutex_release(&rdev->lock);
462 
463 	return len;
464 }
465 
rpmsg_virtio_release_tx_buffer(struct rpmsg_device * rdev,void * txbuf)466 static int rpmsg_virtio_release_tx_buffer(struct rpmsg_device *rdev, void *txbuf)
467 {
468 	struct rpmsg_virtio_device *rvdev;
469 	struct rpmsg_hdr *rp_hdr = RPMSG_LOCATE_HDR(txbuf);
470 	void *vbuff = rp_hdr;  /* only used to avoid warning on the cast of a packed structure */
471 	struct vbuff_reclaimer_t *r_desc = (struct vbuff_reclaimer_t *)vbuff;
472 
473 	rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev);
474 
475 	metal_mutex_acquire(&rdev->lock);
476 
477 	/* Check whether to release the Tx buffer */
478 	if (rpmsg_virtio_buf_held_dec_test(rp_hdr)) {
479 		/*
480 		 * Reuse the RPMsg buffer to temporary store the vbuff_reclaimer_t structure.
481 		 * Store the index locally before overwriting the RPMsg header.
482 		 */
483 		r_desc->idx = RPMSG_BUF_INDEX(rp_hdr);
484 		metal_list_add_tail(&rvdev->reclaimer, &r_desc->node);
485 	}
486 
487 	metal_mutex_release(&rdev->lock);
488 
489 	return RPMSG_SUCCESS;
490 }
491 
492 /**
493  * @internal
494  *
495  * @brief This function sends rpmsg "message" to remote device.
496  *
497  * @param rdev	Pointer to rpmsg device
498  * @param src	Source address of channel
499  * @param dst	Destination address of channel
500  * @param data	Data to transmit
501  * @param len	Size of data
502  * @param wait	Boolean, wait or not for buffer to become
503  *		available
504  *
505  * @return Size of data sent or negative value for failure.
506  */
rpmsg_virtio_send_offchannel_raw(struct rpmsg_device * rdev,uint32_t src,uint32_t dst,const void * data,int len,int wait)507 static int rpmsg_virtio_send_offchannel_raw(struct rpmsg_device *rdev,
508 					    uint32_t src, uint32_t dst,
509 					    const void *data,
510 					    int len, int wait)
511 {
512 	struct rpmsg_virtio_device *rvdev;
513 	struct metal_io_region *io;
514 	uint32_t buff_len;
515 	void *buffer;
516 	int status;
517 
518 	/* Get the associated remote device for channel. */
519 	rvdev = metal_container_of(rdev, struct rpmsg_virtio_device, rdev);
520 
521 	/* Get the payload buffer. */
522 	buffer = rpmsg_virtio_get_tx_payload_buffer(rdev, &buff_len, wait);
523 	if (!buffer)
524 		return RPMSG_ERR_NO_BUFF;
525 
526 	/* Copy data to rpmsg buffer. */
527 	if (len > (int)buff_len)
528 		len = buff_len;
529 	io = rvdev->shbuf_io;
530 	status = metal_io_block_write(io, metal_io_virt_to_offset(io, buffer),
531 				      data, len);
532 	RPMSG_ASSERT(status == len, "failed to write buffer\r\n");
533 
534 	return rpmsg_virtio_send_offchannel_nocopy(rdev, src, dst, buffer, len);
535 }
536 
537 /**
538  * @internal
539  *
540  * @brief Tx callback function.
541  *
542  * @param vq	Pointer to virtqueue on which Tx is has been
543  *		completed.
544  */
rpmsg_virtio_tx_callback(struct virtqueue * vq)545 static void rpmsg_virtio_tx_callback(struct virtqueue *vq)
546 {
547 	(void)vq;
548 }
549 
550 /**
551  * @internal
552  *
553  * @brief Rx callback function.
554  *
555  * @param vq	Pointer to virtqueue on which messages is received
556  */
rpmsg_virtio_rx_callback(struct virtqueue * vq)557 static void rpmsg_virtio_rx_callback(struct virtqueue *vq)
558 {
559 	struct virtio_device *vdev = vq->vq_dev;
560 	struct rpmsg_virtio_device *rvdev = vdev->priv;
561 	struct rpmsg_device *rdev = &rvdev->rdev;
562 	struct rpmsg_endpoint *ept;
563 	struct rpmsg_hdr *rp_hdr;
564 	bool release = false;
565 	uint32_t len;
566 	uint16_t idx;
567 	int status;
568 
569 	while (1) {
570 		/* Process the received data from remote node */
571 		metal_mutex_acquire(&rdev->lock);
572 		rp_hdr = rpmsg_virtio_get_rx_buffer(rvdev, &len, &idx);
573 
574 		/* No more filled rx buffers */
575 		if (!rp_hdr) {
576 			if (VIRTIO_ENABLED(VQ_RX_EMPTY_NOTIFY) && release)
577 				/* Tell peer we returned some rx buffer */
578 				virtqueue_kick(rvdev->rvq);
579 			metal_mutex_release(&rdev->lock);
580 			break;
581 		}
582 
583 		rp_hdr->reserved = idx;
584 
585 		/* Get the channel node from the remote device channels list. */
586 		ept = rpmsg_get_ept_from_addr(rdev, rp_hdr->dst);
587 		rpmsg_ept_incref(ept);
588 		RPMSG_BUF_HELD_INC(rp_hdr);
589 		metal_mutex_release(&rdev->lock);
590 
591 		if (ept) {
592 			if (ept->dest_addr == RPMSG_ADDR_ANY) {
593 				/*
594 				 * First message received from the remote side,
595 				 * update channel destination address
596 				 */
597 				ept->dest_addr = rp_hdr->src;
598 			}
599 			status = ept->cb(ept, RPMSG_LOCATE_DATA(rp_hdr),
600 					 rp_hdr->len, rp_hdr->src, ept->priv);
601 
602 			RPMSG_ASSERT(status >= 0,
603 				     "unexpected callback status\r\n");
604 		}
605 
606 		metal_mutex_acquire(&rdev->lock);
607 		rpmsg_ept_decref(ept);
608 		if (rpmsg_virtio_buf_held_dec_test(rp_hdr)) {
609 			rpmsg_virtio_release_rx_buffer_nolock(rvdev, rp_hdr);
610 			if (VIRTIO_ENABLED(VQ_RX_EMPTY_NOTIFY))
611 				/* Kick will be sent only when last buffer is released */
612 				release = true;
613 			else
614 				/* Tell peer we returned an rx buffer */
615 				virtqueue_kick(rvdev->rvq);
616 		}
617 		metal_mutex_release(&rdev->lock);
618 	}
619 }
620 
621 /**
622  * @internal
623  *
624  * @brief This callback handles name service announcement from the remote
625  * device and creates/deletes rpmsg channels.
626  *
627  * @param ept	Pointer to server channel control block.
628  * @param data	Pointer to received messages
629  * @param len	Length of received data
630  * @param priv	Any private data
631  * @param src	Source address
632  *
633  * @return Rpmsg endpoint callback handled
634  */
rpmsg_virtio_ns_callback(struct rpmsg_endpoint * ept,void * data,size_t len,uint32_t src,void * priv)635 static int rpmsg_virtio_ns_callback(struct rpmsg_endpoint *ept, void *data,
636 				    size_t len, uint32_t src, void *priv)
637 {
638 	struct rpmsg_device *rdev = priv;
639 	struct rpmsg_virtio_device *rvdev = metal_container_of(rdev,
640 							       struct rpmsg_virtio_device,
641 							       rdev);
642 	struct metal_io_region *io = rvdev->shbuf_io;
643 	struct rpmsg_endpoint *_ept;
644 	struct rpmsg_ns_msg *ns_msg;
645 	uint32_t dest;
646 	bool ept_to_release;
647 	char name[RPMSG_NAME_SIZE];
648 
649 	(void)ept;
650 	(void)src;
651 
652 	ns_msg = data;
653 	if (len != sizeof(*ns_msg))
654 		/* Returns as the message is corrupted */
655 		return RPMSG_SUCCESS;
656 	metal_io_block_read(io,
657 			    metal_io_virt_to_offset(io, ns_msg->name),
658 			    &name, sizeof(name));
659 	dest = ns_msg->addr;
660 
661 	/* check if a Ept has been locally registered */
662 	metal_mutex_acquire(&rdev->lock);
663 	_ept = rpmsg_get_endpoint(rdev, name, RPMSG_ADDR_ANY, dest);
664 
665 	/*
666 	 * If ept-release callback is not implemented, ns_unbind_cb() can free the ept.
667 	 * Test _ept->release_cb before calling ns_unbind_cb() callbacks.
668 	 */
669 	ept_to_release = _ept && _ept->release_cb;
670 
671 	if (ns_msg->flags & RPMSG_NS_DESTROY) {
672 		if (_ept)
673 			_ept->dest_addr = RPMSG_ADDR_ANY;
674 		if (ept_to_release)
675 			rpmsg_ept_incref(_ept);
676 		metal_mutex_release(&rdev->lock);
677 		if (_ept && _ept->ns_unbind_cb)
678 			_ept->ns_unbind_cb(_ept);
679 		if (rdev->ns_unbind_cb)
680 			rdev->ns_unbind_cb(rdev, name, dest);
681 		if (ept_to_release) {
682 			metal_mutex_acquire(&rdev->lock);
683 			rpmsg_ept_decref(_ept);
684 			metal_mutex_release(&rdev->lock);
685 		}
686 	} else {
687 		if (!_ept) {
688 			/*
689 			 * send callback to application, that can
690 			 * - create the associated endpoints.
691 			 * - store information for future use.
692 			 * - just ignore the request as service not supported.
693 			 */
694 			metal_mutex_release(&rdev->lock);
695 			if (rdev->ns_bind_cb)
696 				rdev->ns_bind_cb(rdev, name, dest);
697 		} else {
698 			_ept->dest_addr = dest;
699 			metal_mutex_release(&rdev->lock);
700 		}
701 	}
702 
703 	return RPMSG_SUCCESS;
704 }
705 
rpmsg_virtio_get_tx_buffer_size(struct rpmsg_device * rdev)706 int rpmsg_virtio_get_tx_buffer_size(struct rpmsg_device *rdev)
707 {
708 	struct rpmsg_virtio_device *rvdev;
709 	int size = 0;
710 
711 	if (!rdev)
712 		return RPMSG_ERR_PARAM;
713 
714 	metal_mutex_acquire(&rdev->lock);
715 	rvdev = (struct rpmsg_virtio_device *)rdev;
716 
717 	if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev)) {
718 		/*
719 		 * If device role is host then buffers are provided by us,
720 		 * so just provide the macro.
721 		 */
722 		size = rvdev->config.h2r_buf_size - sizeof(struct rpmsg_hdr);
723 	}
724 
725 	if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev)) {
726 		/*
727 		 * If other core is host then buffers are provided by it,
728 		 * so get the buffer size from the virtqueue.
729 		 */
730 		size = (int)virtqueue_get_desc_size(rvdev->svq) -
731 		       sizeof(struct rpmsg_hdr);
732 	}
733 
734 	if (size <= 0)
735 		size = RPMSG_ERR_NO_BUFF;
736 
737 	metal_mutex_release(&rdev->lock);
738 
739 	return size;
740 }
741 
rpmsg_virtio_get_rx_buffer_size(struct rpmsg_device * rdev)742 int rpmsg_virtio_get_rx_buffer_size(struct rpmsg_device *rdev)
743 {
744 	struct rpmsg_virtio_device *rvdev;
745 	int size = 0;
746 
747 	if (!rdev)
748 		return RPMSG_ERR_PARAM;
749 
750 	metal_mutex_acquire(&rdev->lock);
751 	rvdev = (struct rpmsg_virtio_device *)rdev;
752 
753 	if (VIRTIO_ROLE_IS_DRIVER(rvdev->vdev)) {
754 		/*
755 		 * If device role is host then buffers are provided by us,
756 		 * so just provide the macro.
757 		 */
758 		size = rvdev->config.r2h_buf_size - sizeof(struct rpmsg_hdr);
759 	}
760 
761 	if (VIRTIO_ROLE_IS_DEVICE(rvdev->vdev)) {
762 		/*
763 		 * If other core is host then buffers are provided by it,
764 		 * so get the buffer size from the virtqueue.
765 		 */
766 		size = (int)virtqueue_get_desc_size(rvdev->rvq) -
767 		       sizeof(struct rpmsg_hdr);
768 	}
769 
770 	if (size <= 0)
771 		size = RPMSG_ERR_NO_BUFF;
772 
773 	metal_mutex_release(&rdev->lock);
774 
775 	return size;
776 }
777 
rpmsg_init_vdev(struct rpmsg_virtio_device * rvdev,struct virtio_device * vdev,rpmsg_ns_bind_cb ns_bind_cb,struct metal_io_region * shm_io,struct rpmsg_virtio_shm_pool * shpool)778 int rpmsg_init_vdev(struct rpmsg_virtio_device *rvdev,
779 		    struct virtio_device *vdev,
780 		    rpmsg_ns_bind_cb ns_bind_cb,
781 		    struct metal_io_region *shm_io,
782 		    struct rpmsg_virtio_shm_pool *shpool)
783 {
784 	return rpmsg_init_vdev_with_config(rvdev, vdev, ns_bind_cb, shm_io,
785 			   shpool, RPMSG_VIRTIO_DEFAULT_CONFIG);
786 }
787 
rpmsg_init_vdev_with_config(struct rpmsg_virtio_device * rvdev,struct virtio_device * vdev,rpmsg_ns_bind_cb ns_bind_cb,struct metal_io_region * shm_io,struct rpmsg_virtio_shm_pool * shpool,const struct rpmsg_virtio_config * config)788 int rpmsg_init_vdev_with_config(struct rpmsg_virtio_device *rvdev,
789 				struct virtio_device *vdev,
790 				rpmsg_ns_bind_cb ns_bind_cb,
791 				struct metal_io_region *shm_io,
792 				struct rpmsg_virtio_shm_pool *shpool,
793 				const struct rpmsg_virtio_config *config)
794 {
795 	struct rpmsg_device *rdev;
796 	const char *vq_names[RPMSG_NUM_VRINGS];
797 	vq_callback callback[RPMSG_NUM_VRINGS];
798 	uint32_t features;
799 	int status;
800 	unsigned int i;
801 
802 	if (!rvdev || !vdev || !shm_io)
803 		return RPMSG_ERR_PARAM;
804 
805 	rdev = &rvdev->rdev;
806 	rvdev->notify_wait_cb = NULL;
807 	memset(rdev, 0, sizeof(*rdev));
808 	metal_mutex_init(&rdev->lock);
809 	rvdev->vdev = vdev;
810 	rdev->ns_bind_cb = ns_bind_cb;
811 	vdev->priv = rvdev;
812 	rdev->ops.send_offchannel_raw = rpmsg_virtio_send_offchannel_raw;
813 	rdev->ops.hold_rx_buffer = rpmsg_virtio_hold_rx_buffer;
814 	rdev->ops.release_rx_buffer = rpmsg_virtio_release_rx_buffer;
815 	rdev->ops.get_tx_payload_buffer = rpmsg_virtio_get_tx_payload_buffer;
816 	rdev->ops.send_offchannel_nocopy = rpmsg_virtio_send_offchannel_nocopy;
817 	rdev->ops.release_tx_buffer = rpmsg_virtio_release_tx_buffer;
818 	rdev->ops.get_rx_buffer_size = rpmsg_virtio_get_rx_buffer_size;
819 	rdev->ops.get_tx_buffer_size = rpmsg_virtio_get_tx_buffer_size;
820 
821 	if (VIRTIO_ROLE_IS_DRIVER(vdev)) {
822 		/*
823 		 * The virtio configuration contains only options applicable to
824 		 * a virtio driver, implying rpmsg host role.
825 		 */
826 		if (config == NULL) {
827 			return RPMSG_ERR_PARAM;
828 		}
829 		rvdev->config = *config;
830 	}
831 
832 	if (VIRTIO_ROLE_IS_DEVICE(vdev)) {
833 		/* wait synchro with the host */
834 		status = rpmsg_virtio_wait_remote_ready(rvdev);
835 		if (status)
836 			return status;
837 	}
838 
839 	status = virtio_get_features(vdev, &features);
840 	if (status)
841 		return status;
842 	rdev->support_ns = !!(features & (1 << VIRTIO_RPMSG_F_NS));
843 
844 	if (VIRTIO_ROLE_IS_DRIVER(vdev)) {
845 		/*
846 		 * Since device is RPMSG Remote so we need to manage the
847 		 * shared buffers. Create shared memory pool to handle buffers.
848 		 */
849 		rvdev->shpool = config->split_shpool ? shpool + 1 : shpool;
850 		if (!shpool)
851 			return RPMSG_ERR_PARAM;
852 		if (!shpool->size || !rvdev->shpool->size)
853 			return RPMSG_ERR_NO_BUFF;
854 
855 		vq_names[0] = "rx_vq";
856 		vq_names[1] = "tx_vq";
857 		callback[0] = rpmsg_virtio_rx_callback;
858 		callback[1] = rpmsg_virtio_tx_callback;
859 	}
860 
861 	if (VIRTIO_ROLE_IS_DEVICE(vdev)) {
862 		vq_names[0] = "tx_vq";
863 		vq_names[1] = "rx_vq";
864 		callback[0] = rpmsg_virtio_tx_callback;
865 		callback[1] = rpmsg_virtio_rx_callback;
866 	}
867 
868 	rvdev->shbuf_io = shm_io;
869 	metal_list_init(&rvdev->reclaimer);
870 
871 	/* Create virtqueues for remote device */
872 	status = virtio_create_virtqueues(vdev, 0, RPMSG_NUM_VRINGS,
873 					  vq_names, callback, NULL);
874 	if (status != RPMSG_SUCCESS)
875 		return status;
876 
877 	/* Create virtqueue success, assign back the virtqueue */
878 	if (VIRTIO_ROLE_IS_DRIVER(vdev)) {
879 		rvdev->rvq  = vdev->vrings_info[0].vq;
880 		rvdev->svq  = vdev->vrings_info[1].vq;
881 	}
882 
883 	if (VIRTIO_ROLE_IS_DEVICE(vdev)) {
884 		rvdev->rvq  = vdev->vrings_info[1].vq;
885 		rvdev->svq  = vdev->vrings_info[0].vq;
886 	}
887 
888 	/*
889 	 * Suppress "tx-complete" interrupts
890 	 * since send method use busy loop when buffer pool exhaust
891 	 */
892 	virtqueue_disable_cb(rvdev->svq);
893 
894 	/* TODO: can have a virtio function to set the shared memory I/O */
895 	for (i = 0; i < RPMSG_NUM_VRINGS; i++) {
896 		struct virtqueue *vq;
897 
898 		vq = vdev->vrings_info[i].vq;
899 		vq->shm_io = shm_io;
900 	}
901 
902 	if (VIRTIO_ROLE_IS_DRIVER(vdev)) {
903 		struct virtqueue_buf vqbuf;
904 		unsigned int idx;
905 		void *buffer;
906 
907 		vqbuf.len = rvdev->config.r2h_buf_size;
908 		for (idx = 0; idx < rvdev->rvq->vq_nentries; idx++) {
909 			/* Initialize TX virtqueue buffers for remote device */
910 			buffer = rpmsg_virtio_shm_pool_get_buffer(shpool,
911 					rvdev->config.r2h_buf_size);
912 
913 			if (!buffer) {
914 				status = RPMSG_ERR_NO_BUFF;
915 				goto err;
916 			}
917 
918 			vqbuf.buf = buffer;
919 
920 			metal_io_block_set(shm_io,
921 					   metal_io_virt_to_offset(shm_io,
922 								   buffer),
923 					   0x00, rvdev->config.r2h_buf_size);
924 			status =
925 				virtqueue_add_buffer(rvdev->rvq, &vqbuf, 0, 1,
926 						     buffer);
927 
928 			if (status != RPMSG_SUCCESS) {
929 				goto err;
930 			}
931 		}
932 	}
933 
934 	/* Initialize channels and endpoints list */
935 	metal_list_init(&rdev->endpoints);
936 
937 	/*
938 	 * Create name service announcement endpoint if device supports name
939 	 * service announcement feature.
940 	 */
941 	if (rdev->support_ns) {
942 		rpmsg_register_endpoint(rdev, &rdev->ns_ept, "NS",
943 				     RPMSG_NS_EPT_ADDR, RPMSG_NS_EPT_ADDR,
944 				     rpmsg_virtio_ns_callback, NULL, rvdev);
945 	}
946 
947 	if (VIRTIO_ROLE_IS_DRIVER(vdev)) {
948 		status = virtio_set_status(vdev, VIRTIO_CONFIG_STATUS_DRIVER_OK);
949 		if (status)
950 			goto err;
951 	}
952 
953 	return RPMSG_SUCCESS;
954 
955 err:
956 	virtio_delete_virtqueues(vdev);
957 	return status;
958 }
959 
rpmsg_deinit_vdev(struct rpmsg_virtio_device * rvdev)960 void rpmsg_deinit_vdev(struct rpmsg_virtio_device *rvdev)
961 {
962 	struct metal_list *node;
963 	struct rpmsg_device *rdev;
964 	struct rpmsg_endpoint *ept;
965 
966 	if (rvdev) {
967 		rdev = &rvdev->rdev;
968 		while (!metal_list_is_empty(&rdev->endpoints)) {
969 			node = rdev->endpoints.next;
970 			ept = metal_container_of(node, struct rpmsg_endpoint, node);
971 			rpmsg_destroy_ept(ept);
972 		}
973 
974 		rvdev->rvq = 0;
975 		rvdev->svq = 0;
976 
977 		virtio_delete_virtqueues(rvdev->vdev);
978 		metal_mutex_deinit(&rdev->lock);
979 	}
980 }
981