1 /*
2 * Copyright (c) 2014, Mentor Graphics Corporation
3 * Copyright (c) 2015 Xilinx, Inc.
4 * Copyright (c) 2016 Freescale Semiconductor, Inc.
5 * Copyright 2016-2024 NXP
6 * Copyright 2021 ACRIOS Systems s.r.o.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright notice,
13 * this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright notice,
15 * this list of conditions and the following disclaimer in the documentation
16 * and/or other materials provided with the distribution.
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "rpmsg_lite.h"
35 #include "rpmsg_platform.h"
36
37 /* Interface which is used to interact with the virtqueue layer,
38 * a different interface is used, when the local processor is the MASTER
39 * and when it is the REMOTE.
40 */
41 struct virtqueue_ops
42 {
43 void (*vq_tx)(struct virtqueue *vq, void *buffer, uint32_t len, uint16_t idx);
44 void *(*vq_tx_alloc)(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
45 void *(*vq_rx)(struct virtqueue *vq, uint32_t *len, uint16_t *idx);
46 void (*vq_rx_free)(struct virtqueue *vq, void *buffer, uint32_t len, uint16_t idx);
47 };
48
49 /* Zero-Copy extension macros */
50 #define RPMSG_STD_MSG_FROM_BUF(buf) (struct rpmsg_std_msg *)(void *)((char *)(buf)-offsetof(struct rpmsg_std_msg, data))
51
52 #if !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1))
53 /* Check RL_BUFFER_COUNT and RL_BUFFER_SIZE only when RL_ALLOW_CUSTOM_SHMEM_CONFIG is not set to 1 */
54 #if (!RL_BUFFER_COUNT) || (RL_BUFFER_COUNT & (RL_BUFFER_COUNT - 1))
55 #error "RL_BUFFER_COUNT must be power of two (2, 4, ...)"
56 #endif
57
58 /* Buffer is formed by payload and struct rpmsg_std_hdr */
59 #define RL_BUFFER_SIZE (RL_BUFFER_PAYLOAD_SIZE + 16UL)
60
61 #if (!RL_BUFFER_SIZE) || (RL_BUFFER_SIZE & (RL_BUFFER_SIZE - 1))
62 #error \
63 "RL_BUFFER_SIZE must be power of two (256, 512, ...)"\
64 "RL_BUFFER_PAYLOAD_SIZE must be equal to (240, 496, 1008, ...) [2^n - 16]."
65 #endif
66 #endif /* !(defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)) */
67
68 /*!
69 * @brief
70 * Traverse the linked list of endpoints to get the one with defined address.
71 *
72 * @param rpmsg_lite_dev RPMsg Lite instance
73 * @param addr Local endpoint address
74 *
75 * @return RL_NULL if not found, node pointer containing the ept on success
76 *
77 */
rpmsg_lite_get_endpoint_from_addr(struct rpmsg_lite_instance * rpmsg_lite_dev,uint32_t addr)78 static struct llist *rpmsg_lite_get_endpoint_from_addr(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t addr)
79 {
80 struct llist *rl_ept_lut_head;
81
82 rl_ept_lut_head = rpmsg_lite_dev->rl_endpoints;
83 while (rl_ept_lut_head != RL_NULL)
84 {
85 struct rpmsg_lite_endpoint *rl_ept = (struct rpmsg_lite_endpoint *)rl_ept_lut_head->data;
86 if (rl_ept->addr == addr)
87 {
88 return rl_ept_lut_head;
89 }
90 rl_ept_lut_head = rl_ept_lut_head->next;
91 }
92 return RL_NULL;
93 }
94
95 /***************************************************************
96 mmm mm m m mmmmm mm mmm m m mmmm
97 m" " ## # # # # ## m" " # m" #" "
98 # # # # # #mmmm" # # # #m# "#mmm
99 # #mm# # # # # #mm# # # #m "#
100 "mmm" # # #mmmmm #mmmmm #mmmm" # # "mmm" # "m "mmm#"
101 ****************************************************************/
102
103 /*!
104 * @brief
105 * Called when remote side calls virtqueue_kick()
106 * at its transmit virtqueue.
107 * In this callback, the buffer is read-out
108 * of the rvq and user callback is called.
109 *
110 * @param vq Virtqueue affected by the kick
111 *
112 */
rpmsg_lite_rx_callback(struct virtqueue * vq)113 static void rpmsg_lite_rx_callback(struct virtqueue *vq)
114 {
115 struct rpmsg_std_msg *rpmsg_msg;
116 uint32_t len;
117 uint16_t idx;
118 struct rpmsg_lite_endpoint *ept;
119 int32_t cb_ret;
120 struct llist *node;
121 struct rpmsg_lite_instance *rpmsg_lite_dev = (struct rpmsg_lite_instance *)vq->priv;
122 #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
123 uint32_t rx_freed = RL_FALSE;
124 #endif
125
126 RL_ASSERT(rpmsg_lite_dev != RL_NULL);
127
128 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
129 env_lock_mutex(rpmsg_lite_dev->lock);
130 #endif
131
132 /* Process the received data from remote node */
133 rpmsg_msg = (struct rpmsg_std_msg *)rpmsg_lite_dev->vq_ops->vq_rx(rpmsg_lite_dev->rvq, &len, &idx);
134
135 while (rpmsg_msg != RL_NULL)
136 {
137 node = rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, rpmsg_msg->hdr.dst);
138
139 cb_ret = RL_RELEASE;
140 if (node != RL_NULL)
141 {
142 ept = (struct rpmsg_lite_endpoint *)node->data;
143 cb_ret = ept->rx_cb(rpmsg_msg->data, rpmsg_msg->hdr.len, rpmsg_msg->hdr.src, ept->rx_cb_data);
144 }
145
146 if (cb_ret == RL_HOLD)
147 {
148 rpmsg_msg->hdr.reserved.idx = idx;
149 }
150 else
151 {
152 rpmsg_lite_dev->vq_ops->vq_rx_free(rpmsg_lite_dev->rvq, rpmsg_msg, len, idx);
153 #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
154 rx_freed = RL_TRUE;
155 #endif
156 }
157 rpmsg_msg = (struct rpmsg_std_msg *)rpmsg_lite_dev->vq_ops->vq_rx(rpmsg_lite_dev->rvq, &len, &idx);
158 #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
159 if ((rpmsg_msg == RL_NULL) && (rx_freed == RL_TRUE))
160 {
161 /* Let the remote device know that some buffers have been freed */
162 virtqueue_kick(rpmsg_lite_dev->rvq);
163 }
164 #endif
165 }
166
167 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
168 env_unlock_mutex(rpmsg_lite_dev->lock);
169 #endif
170 }
171
172 /*!
173 * @brief
174 * Called when remote side calls virtqueue_kick()
175 * at its receive virtqueue.
176 *
177 * @param vq Virtqueue affected by the kick
178 *
179 */
rpmsg_lite_tx_callback(struct virtqueue * vq)180 static void rpmsg_lite_tx_callback(struct virtqueue *vq)
181 {
182 struct rpmsg_lite_instance *rpmsg_lite_dev = (struct rpmsg_lite_instance *)vq->priv;
183
184 RL_ASSERT(rpmsg_lite_dev != RL_NULL);
185 rpmsg_lite_dev->link_state = 1U;
186 env_tx_callback(rpmsg_lite_dev->link_id);
187 }
188
189 /****************************************************************************
190
191 m m mmmm m m mm mm m mmmm m mmmmm mm m mmm
192 "m m" m" "m # # ## #"m # # "m # # #"m # m" "
193 # # # # #mmmm# # # # #m # # # # # # #m # # mm
194 "mm" # # # # #mm# # # # # # # # # # # # #
195 ## #mm#" # # # # # ## #mmm" #mmmmm mm#mm # ## "mmm"
196 #
197 In case this processor has the REMOTE role
198 *****************************************************************************/
199 /*!
200 * @brief
201 * Places buffer on the virtqueue for consumption by the other side.
202 *
203 * @param vq Virtqueue to use
204 * @param buffer Buffer pointer
205 * @param len Buffer length
206 * @idx Buffer index
207 *
208 * @return Status of function execution
209 *
210 */
vq_tx_remote(struct virtqueue * tvq,void * buffer,uint32_t len,uint16_t idx)211 static void vq_tx_remote(struct virtqueue *tvq, void *buffer, uint32_t len, uint16_t idx)
212 {
213 int32_t status;
214 status = virtqueue_add_consumed_buffer(tvq, idx, len);
215 RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
216
217 /* As long as the length of the virtqueue ring buffer is not shorter
218 * than the number of buffers in the pool, this function should not fail.
219 * This condition is always met, so we don't need to return anything here */
220 }
221
222 /*!
223 * @brief
224 * Provides buffer to transmit messages.
225 *
226 * @param vq Virtqueue to use
227 * @param len Length of returned buffer
228 * @param idx Buffer index
229 *
230 * return Pointer to buffer.
231 */
vq_tx_alloc_remote(struct virtqueue * tvq,uint32_t * len,uint16_t * idx)232 static void *vq_tx_alloc_remote(struct virtqueue *tvq, uint32_t *len, uint16_t *idx)
233 {
234 return virtqueue_get_available_buffer(tvq, idx, len);
235 }
236
237 /*!
238 * @brief
239 * Retrieves the received buffer from the virtqueue.
240 *
241 * @param vq Virtqueue to use
242 * @param len Size of received buffer
243 * @param idx Index of buffer
244 *
245 * @return Pointer to received buffer
246 *
247 */
vq_rx_remote(struct virtqueue * rvq,uint32_t * len,uint16_t * idx)248 static void *vq_rx_remote(struct virtqueue *rvq, uint32_t *len, uint16_t *idx)
249 {
250 return virtqueue_get_available_buffer(rvq, idx, len);
251 }
252
253 /*!
254 * @brief
255 * Places the used buffer back on the virtqueue.
256 *
257 * @param vq Virtqueue to use
258 * @param len Size of received buffer
259 * @param idx Index of buffer
260 *
261 */
vq_rx_free_remote(struct virtqueue * rvq,void * buffer,uint32_t len,uint16_t idx)262 static void vq_rx_free_remote(struct virtqueue *rvq, void *buffer, uint32_t len, uint16_t idx)
263 {
264 int32_t status;
265 #if defined(RL_CLEAR_USED_BUFFERS) && (RL_CLEAR_USED_BUFFERS == 1)
266 env_memset(buffer, 0x00, len);
267 #endif
268 status = virtqueue_add_consumed_buffer(rvq, idx, len);
269 RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
270 /* As long as the length of the virtqueue ring buffer is not shorter
271 * than the number of buffers in the pool, this function should not fail.
272 * This condition is always met, so we don't need to return anything here */
273 }
274
275 /****************************************************************************
276
277 m m mmmm m m mm mm m mmmm m mmmmm mm m mmm
278 "m m" m" "m # # ## #"m # # "m # # #"m # m" "
279 # # # # #mmmm# # # # #m # # # # # # #m # # mm
280 "mm" # # # # #mm# # # # # # # # # # # # #
281 ## #mm#" # # # # # ## #mmm" #mmmmm mm#mm # ## "mmm"
282 #
283 In case this processor has the MASTER role
284 *****************************************************************************/
285
286 /*!
287 * @brief
288 * Places buffer on the virtqueue for consumption by the other side.
289 *
290 * @param tvq Virtqueue to use
291 * @param buffer Buffer pointer
292 * @param len Buffer length
293 * @param idx Buffer index
294 *
295 * @return Status of function execution
296 *
297 */
vq_tx_master(struct virtqueue * tvq,void * buffer,uint32_t len,uint16_t idx)298 static void vq_tx_master(struct virtqueue *tvq, void *buffer, uint32_t len, uint16_t idx)
299 {
300 int32_t status;
301 status = virtqueue_add_buffer(tvq, idx);
302 RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
303
304 /* As long as the length of the virtqueue ring buffer is not shorter
305 * than the number of buffers in the pool, this function should not fail.
306 * This condition is always met, so we don't need to return anything here */
307 }
308
309 /*!
310 * @brief
311 * Provides buffer to transmit messages.
312 *
313 * @param tvq Virtqueue to use
314 * @param len Length of returned buffer
315 * @param idx Buffer index
316 *
317 * return Pointer to buffer.
318 */
vq_tx_alloc_master(struct virtqueue * tvq,uint32_t * len,uint16_t * idx)319 static void *vq_tx_alloc_master(struct virtqueue *tvq, uint32_t *len, uint16_t *idx)
320 {
321 return virtqueue_get_buffer(tvq, len, idx);
322 }
323
324 /*!
325 * @brief
326 * Retrieves the received buffer from the virtqueue.
327 *
328 * @param rvq Virtqueue to use
329 * @param len Size of received buffer
330 * @param idx Index of buffer
331 *
332 * @return Pointer to received buffer
333 *
334 */
vq_rx_master(struct virtqueue * rvq,uint32_t * len,uint16_t * idx)335 static void *vq_rx_master(struct virtqueue *rvq, uint32_t *len, uint16_t *idx)
336 {
337 return virtqueue_get_buffer(rvq, len, idx);
338 }
339
340 /*!
341 * @brief
342 * Places the used buffer back on the virtqueue.
343 *
344 * @param rvq Virtqueue to use
345 * @param buffer Buffer pointer
346 * @param len Size of received buffer
347 * @param idx Index of buffer
348 *
349 */
vq_rx_free_master(struct virtqueue * rvq,void * buffer,uint32_t len,uint16_t idx)350 static void vq_rx_free_master(struct virtqueue *rvq, void *buffer, uint32_t len, uint16_t idx)
351 {
352 int32_t status;
353 #if defined(RL_CLEAR_USED_BUFFERS) && (RL_CLEAR_USED_BUFFERS == 1)
354 env_memset(buffer, 0x00, len);
355 #endif
356 status = virtqueue_add_buffer(rvq, idx);
357 RL_ASSERT(status == VQUEUE_SUCCESS); /* must success here */
358
359 /* As long as the length of the virtqueue ring buffer is not shorter
360 * than the number of buffers in the pool, this function should not fail.
361 * This condition is always met, so we don't need to return anything here */
362 }
363
364 /* Interface used in case this processor is MASTER */
365 static const struct virtqueue_ops master_vq_ops = {
366 vq_tx_master,
367 vq_tx_alloc_master,
368 vq_rx_master,
369 vq_rx_free_master,
370 };
371
372 /* Interface used in case this processor is REMOTE */
373 static const struct virtqueue_ops remote_vq_ops = {
374 vq_tx_remote,
375 vq_tx_alloc_remote,
376 vq_rx_remote,
377 vq_rx_free_remote,
378 };
379
380 /* helper function for virtqueue notification */
virtqueue_notify(struct virtqueue * vq)381 static void virtqueue_notify(struct virtqueue *vq)
382 {
383 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
384 struct rpmsg_lite_instance *inst = vq->priv;
385 platform_notify(inst->env ? env_get_platform_context(inst->env) : RL_NULL, vq->vq_queue_index);
386 #else
387 platform_notify(vq->vq_queue_index);
388 #endif
389 }
390
391 /*************************************************
392
393 mmmmmm mmmmm mmmmmmm mm m mmmmmmm m
394 # # "# # #"m # # # # #
395 #mmmmm #mmm#" # # #m # #mmmmm" #"# #
396 # # # # # # # ## ##"
397 #mmmmm # # # ## #mmmmm # #
398
399 **************************************************/
400 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
rpmsg_lite_create_ept(struct rpmsg_lite_instance * rpmsg_lite_dev,uint32_t addr,rl_ept_rx_cb_t rx_cb,void * rx_cb_data,struct rpmsg_lite_ept_static_context * ept_context)401 struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev,
402 uint32_t addr,
403 rl_ept_rx_cb_t rx_cb,
404 void *rx_cb_data,
405 struct rpmsg_lite_ept_static_context *ept_context)
406 #else
407 struct rpmsg_lite_endpoint *rpmsg_lite_create_ept(struct rpmsg_lite_instance *rpmsg_lite_dev,
408 uint32_t addr,
409 rl_ept_rx_cb_t rx_cb,
410 void *rx_cb_data)
411 #endif
412 {
413 struct rpmsg_lite_endpoint *rl_ept;
414 struct llist *node;
415 uint32_t i;
416
417 if (rpmsg_lite_dev == RL_NULL)
418 {
419 return RL_NULL;
420 }
421
422 env_lock_mutex(rpmsg_lite_dev->lock);
423 {
424 if (addr == RL_ADDR_ANY)
425 {
426 /* find lowest free address */
427 for (i = 1; i < 0xFFFFFFFFU; i++)
428 {
429 if (rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, i) == RL_NULL)
430 {
431 addr = i;
432 break;
433 }
434 }
435 if (addr == RL_ADDR_ANY)
436 {
437 /* no address is free, cannot happen normally */
438 env_unlock_mutex(rpmsg_lite_dev->lock);
439 return RL_NULL;
440 }
441 }
442 else
443 {
444 if (rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, addr) != RL_NULL)
445 {
446 /* Already exists! */
447 env_unlock_mutex(rpmsg_lite_dev->lock);
448 return RL_NULL;
449 }
450 }
451
452 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
453 if (ept_context == RL_NULL)
454 {
455 env_unlock_mutex(rpmsg_lite_dev->lock);
456 return RL_NULL;
457 }
458
459 rl_ept = &(ept_context->ept);
460 node = &(ept_context->node);
461 #else
462 rl_ept = env_allocate_memory(sizeof(struct rpmsg_lite_endpoint));
463 if (rl_ept == RL_NULL)
464 {
465 env_unlock_mutex(rpmsg_lite_dev->lock);
466 return RL_NULL;
467 }
468 node = env_allocate_memory(sizeof(struct llist));
469 if (node == RL_NULL)
470 {
471 env_free_memory(rl_ept);
472 env_unlock_mutex(rpmsg_lite_dev->lock);
473 return RL_NULL;
474 }
475 #endif /* RL_USE_STATIC_API */
476
477 env_memset(rl_ept, 0x00, sizeof(struct rpmsg_lite_endpoint));
478
479 rl_ept->addr = addr;
480 rl_ept->rx_cb = rx_cb;
481 rl_ept->rx_cb_data = rx_cb_data;
482
483 node->data = rl_ept;
484
485 add_to_list((struct llist **)&rpmsg_lite_dev->rl_endpoints, node);
486 }
487 env_unlock_mutex(rpmsg_lite_dev->lock);
488
489 return rl_ept;
490 }
491 /*************************************************
492
493 mmmmmm mmmmm mmmmmmm mmmm mmmmmm m
494 # # "# # # "m # #
495 #mmmmm #mmm#" # # # #mmmmm #
496 # # # # # # #
497 #mmmmm # # #mmm" #mmmmm #mmmmm
498
499 **************************************************/
500
rpmsg_lite_destroy_ept(struct rpmsg_lite_instance * rpmsg_lite_dev,struct rpmsg_lite_endpoint * rl_ept)501 int32_t rpmsg_lite_destroy_ept(struct rpmsg_lite_instance *rpmsg_lite_dev, struct rpmsg_lite_endpoint *rl_ept)
502 {
503 struct llist *node;
504
505 if (rpmsg_lite_dev == RL_NULL)
506 {
507 return RL_ERR_PARAM;
508 }
509
510 if (rl_ept == RL_NULL)
511 {
512 return RL_ERR_PARAM;
513 }
514
515 env_lock_mutex(rpmsg_lite_dev->lock);
516 node = rpmsg_lite_get_endpoint_from_addr(rpmsg_lite_dev, rl_ept->addr);
517 if (node != RL_NULL)
518 {
519 remove_from_list((struct llist **)&rpmsg_lite_dev->rl_endpoints, node);
520 env_unlock_mutex(rpmsg_lite_dev->lock);
521 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
522 env_free_memory(node);
523 env_free_memory(rl_ept);
524 #endif
525 return RL_SUCCESS;
526 }
527 else
528 {
529 env_unlock_mutex(rpmsg_lite_dev->lock);
530 return RL_ERR_PARAM;
531 }
532 }
533
534 /******************************************
535
536 mmmmmmm m m mm mmmmm mmmmm
537 # # # ## # "# #
538 # ## # # #mmm#" #
539 # m""m #mm# # #
540 # m" "m # # # mm#mm
541
542 *******************************************/
543
rpmsg_lite_is_link_up(struct rpmsg_lite_instance * rpmsg_lite_dev)544 uint32_t rpmsg_lite_is_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev)
545 {
546 if (rpmsg_lite_dev == RL_NULL)
547 {
548 return 0U;
549 }
550
551 return (RL_TRUE == rpmsg_lite_dev->link_state ? RL_TRUE : RL_FALSE);
552 }
553
rpmsg_lite_wait_for_link_up(struct rpmsg_lite_instance * rpmsg_lite_dev,uint32_t timeout)554 uint32_t rpmsg_lite_wait_for_link_up(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t timeout)
555 {
556 if (rpmsg_lite_dev == RL_NULL)
557 {
558 return 0U;
559 }
560
561 return env_wait_for_link_up(&rpmsg_lite_dev->link_state, rpmsg_lite_dev->link_id, timeout);
562 }
563
564 /*!
565 * @brief
566 * Internal function to format a RPMsg compatible
567 * message and sends it
568 *
569 * @param rpmsg_lite_dev RPMsg Lite instance
570 * @param src Local endpoint address
571 * @param dst Remote endpoint address
572 * @param data Payload buffer
573 * @param size Size of payload, in bytes
574 * @param flags Value of flags field
575 * @param timeout Timeout in ms, 0 if nonblocking
576 *
577 * @return Status of function execution, RL_SUCCESS on success
578 *
579 */
rpmsg_lite_format_message(struct rpmsg_lite_instance * rpmsg_lite_dev,uint32_t src,uint32_t dst,char * data,uint32_t size,uint32_t flags,uint32_t timeout)580 static int32_t rpmsg_lite_format_message(struct rpmsg_lite_instance *rpmsg_lite_dev,
581 uint32_t src,
582 uint32_t dst,
583 char *data,
584 uint32_t size,
585 uint32_t flags,
586 uint32_t timeout)
587 {
588 struct rpmsg_std_msg *rpmsg_msg;
589 void *buffer;
590 uint16_t idx;
591 uint32_t tick_count = 0U;
592 uint32_t buff_len;
593
594 if (rpmsg_lite_dev == RL_NULL)
595 {
596 return RL_ERR_PARAM;
597 }
598
599 if (data == RL_NULL)
600 {
601 return RL_ERR_PARAM;
602 }
603
604 if (rpmsg_lite_dev->link_state != RL_TRUE)
605 {
606 return RL_NOT_READY;
607 }
608
609 /* Lock the device to enable exclusive access to virtqueues */
610 env_lock_mutex(rpmsg_lite_dev->lock);
611 /* Get rpmsg buffer for sending message. */
612 buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, &buff_len, &idx);
613 env_unlock_mutex(rpmsg_lite_dev->lock);
614
615 if ((buffer == RL_NULL) && (timeout == RL_FALSE))
616 {
617 return RL_ERR_NO_MEM;
618 }
619
620 while (buffer == RL_NULL)
621 {
622 env_sleep_msec(RL_MS_PER_INTERVAL);
623 env_lock_mutex(rpmsg_lite_dev->lock);
624 buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, &buff_len, &idx);
625 env_unlock_mutex(rpmsg_lite_dev->lock);
626 tick_count += (uint32_t)RL_MS_PER_INTERVAL;
627 if ((tick_count >= timeout) && (buffer == RL_NULL))
628 {
629 return RL_ERR_NO_MEM;
630 }
631 }
632
633 rpmsg_msg = (struct rpmsg_std_msg *)buffer;
634
635 /* Initialize RPMSG header. */
636 rpmsg_msg->hdr.dst = dst;
637 rpmsg_msg->hdr.src = src;
638 rpmsg_msg->hdr.len = (uint16_t)size;
639 rpmsg_msg->hdr.flags = (uint16_t)(flags & 0xFFFFU);
640
641 /* Copy data to rpmsg buffer. */
642 env_memcpy(rpmsg_msg->data, data, size);
643
644 env_lock_mutex(rpmsg_lite_dev->lock);
645 /* Enqueue buffer on virtqueue. */
646 rpmsg_lite_dev->vq_ops->vq_tx(rpmsg_lite_dev->tvq, buffer, buff_len, idx);
647 /* Let the other side know that there is a job to process. */
648 virtqueue_kick(rpmsg_lite_dev->tvq);
649 env_unlock_mutex(rpmsg_lite_dev->lock);
650
651 return RL_SUCCESS;
652 }
653
rpmsg_lite_send(struct rpmsg_lite_instance * rpmsg_lite_dev,struct rpmsg_lite_endpoint * ept,uint32_t dst,char * data,uint32_t size,uintptr_t timeout)654 int32_t rpmsg_lite_send(struct rpmsg_lite_instance *rpmsg_lite_dev,
655 struct rpmsg_lite_endpoint *ept,
656 uint32_t dst,
657 char *data,
658 uint32_t size,
659 uintptr_t timeout)
660 {
661 if (ept == RL_NULL)
662 {
663 return RL_ERR_PARAM;
664 }
665
666 // FIXME : may be just copy the data size equal to buffer length and Tx it.
667 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
668 rpmsg_platform_shmem_config_t shmem_config;
669 (void)platform_get_custom_shmem_config(rpmsg_lite_dev->link_id, &shmem_config);
670 if (size > (uint32_t)shmem_config.buffer_payload_size)
671 #else
672 if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE)
673 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
674 {
675 return RL_ERR_BUFF_SIZE;
676 }
677
678 return rpmsg_lite_format_message(rpmsg_lite_dev, ept->addr, dst, data, size, RL_NO_FLAGS, timeout);
679 }
680
681 #if defined(RL_API_HAS_ZEROCOPY) && (RL_API_HAS_ZEROCOPY == 1)
682
rpmsg_lite_alloc_tx_buffer(struct rpmsg_lite_instance * rpmsg_lite_dev,uint32_t * size,uintptr_t timeout)683 void *rpmsg_lite_alloc_tx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, uint32_t *size, uintptr_t timeout)
684 {
685 struct rpmsg_std_msg *rpmsg_msg;
686 void *buffer;
687 uint16_t idx;
688 uint32_t tick_count = 0U;
689
690 if (size == RL_NULL)
691 {
692 return RL_NULL;
693 }
694
695 if (rpmsg_lite_dev->link_state != RL_TRUE)
696 {
697 *size = 0;
698 return RL_NULL;
699 }
700
701 /* Lock the device to enable exclusive access to virtqueues */
702 env_lock_mutex(rpmsg_lite_dev->lock);
703 /* Get rpmsg buffer for sending message. */
704 buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, size, &idx);
705 env_unlock_mutex(rpmsg_lite_dev->lock);
706
707 if ((buffer == RL_NULL) && (timeout == RL_FALSE))
708 {
709 *size = 0;
710 return RL_NULL;
711 }
712
713 while (buffer == RL_NULL)
714 {
715 env_sleep_msec(RL_MS_PER_INTERVAL);
716 env_lock_mutex(rpmsg_lite_dev->lock);
717 buffer = rpmsg_lite_dev->vq_ops->vq_tx_alloc(rpmsg_lite_dev->tvq, size, &idx);
718 env_unlock_mutex(rpmsg_lite_dev->lock);
719 tick_count += (uint32_t)RL_MS_PER_INTERVAL;
720 if ((tick_count >= timeout) && (buffer == RL_NULL))
721 {
722 *size = 0;
723 return RL_NULL;
724 }
725 }
726
727 rpmsg_msg = (struct rpmsg_std_msg *)buffer;
728
729 /* keep idx and totlen information for nocopy tx function */
730 rpmsg_msg->hdr.reserved.idx = idx;
731
732 /* return the maximum payload size */
733 *size -= sizeof(struct rpmsg_std_hdr);
734
735 return rpmsg_msg->data;
736 }
737
rpmsg_lite_send_nocopy(struct rpmsg_lite_instance * rpmsg_lite_dev,struct rpmsg_lite_endpoint * ept,uint32_t dst,void * data,uint32_t size)738 int32_t rpmsg_lite_send_nocopy(struct rpmsg_lite_instance *rpmsg_lite_dev,
739 struct rpmsg_lite_endpoint *ept,
740 uint32_t dst,
741 void *data,
742 uint32_t size)
743 {
744 struct rpmsg_std_msg *rpmsg_msg;
745 uint32_t src;
746
747 if ((ept == RL_NULL) || (data == RL_NULL))
748 {
749 return RL_ERR_PARAM;
750 }
751
752 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
753 rpmsg_platform_shmem_config_t shmem_config;
754 (void)platform_get_custom_shmem_config(rpmsg_lite_dev->link_id, &shmem_config);
755 if (size > (uint32_t)shmem_config.buffer_payload_size)
756 #else
757 if (size > (uint32_t)RL_BUFFER_PAYLOAD_SIZE)
758 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
759 {
760 return RL_ERR_BUFF_SIZE;
761 }
762
763 if (rpmsg_lite_dev->link_state != RL_TRUE)
764 {
765 return RL_NOT_READY;
766 }
767
768 src = ept->addr;
769
770 rpmsg_msg = RPMSG_STD_MSG_FROM_BUF(data);
771
772 #if defined(RL_DEBUG_CHECK_BUFFERS) && (RL_DEBUG_CHECK_BUFFERS == 1)
773 /* Check that the to-be-sent buffer is in the VirtIO ring descriptors list */
774 int32_t idx = rpmsg_lite_dev->tvq->vq_nentries - 1;
775 while ((idx >= 0) && (rpmsg_lite_dev->tvq->vq_ring.desc[idx].addr != (uint64_t)rpmsg_msg))
776 {
777 idx--;
778 }
779 RL_ASSERT(idx >= 0);
780 #endif
781
782 /* Initialize RPMSG header. */
783 rpmsg_msg->hdr.dst = dst;
784 rpmsg_msg->hdr.src = src;
785 rpmsg_msg->hdr.len = (uint16_t)size;
786 rpmsg_msg->hdr.flags = (uint16_t)(RL_NO_FLAGS & 0xFFFFU);
787
788 env_lock_mutex(rpmsg_lite_dev->lock);
789 /* Enqueue buffer on virtqueue. */
790 rpmsg_lite_dev->vq_ops->vq_tx(
791 rpmsg_lite_dev->tvq, (void *)rpmsg_msg,
792 (uint32_t)virtqueue_get_buffer_length(rpmsg_lite_dev->tvq, rpmsg_msg->hdr.reserved.idx),
793 rpmsg_msg->hdr.reserved.idx);
794 /* Let the other side know that there is a job to process. */
795 virtqueue_kick(rpmsg_lite_dev->tvq);
796 env_unlock_mutex(rpmsg_lite_dev->lock);
797
798 return RL_SUCCESS;
799 }
800
801 /******************************************
802
803 mmmmm m m mm mmmmm mmmmm
804 # "# # # ## # "# #
805 #mmmm" ## # # #mmm#" #
806 # "m m""m #mm# # #
807 # " m" "m # # # mm#mm
808
809 *******************************************/
810
rpmsg_lite_release_rx_buffer(struct rpmsg_lite_instance * rpmsg_lite_dev,void * rxbuf)811 int32_t rpmsg_lite_release_rx_buffer(struct rpmsg_lite_instance *rpmsg_lite_dev, void *rxbuf)
812 {
813 struct rpmsg_std_msg *rpmsg_msg;
814
815 if (rpmsg_lite_dev == RL_NULL)
816 {
817 return RL_ERR_PARAM;
818 }
819 if (rxbuf == RL_NULL)
820 {
821 return RL_ERR_PARAM;
822 }
823
824 rpmsg_msg = RPMSG_STD_MSG_FROM_BUF(rxbuf);
825
826 #if defined(RL_DEBUG_CHECK_BUFFERS) && (RL_DEBUG_CHECK_BUFFERS == 1)
827 /* Check that the to-be-released buffer is in the VirtIO ring descriptors list */
828 int32_t idx = rpmsg_lite_dev->rvq->vq_nentries - 1;
829 while ((idx >= 0) && (rpmsg_lite_dev->rvq->vq_ring.desc[idx].addr != (uint64_t)rpmsg_msg))
830 {
831 idx--;
832 }
833 RL_ASSERT(idx >= 0);
834 #endif
835
836 env_lock_mutex(rpmsg_lite_dev->lock);
837
838 /* Return used buffer, with total length (header length + buffer size). */
839 rpmsg_lite_dev->vq_ops->vq_rx_free(
840 rpmsg_lite_dev->rvq, rpmsg_msg,
841 (uint32_t)virtqueue_get_buffer_length(rpmsg_lite_dev->rvq, rpmsg_msg->hdr.reserved.idx),
842 rpmsg_msg->hdr.reserved.idx);
843
844 #if defined(RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION) && (RL_ALLOW_CONSUMED_BUFFERS_NOTIFICATION == 1)
845 /* Let the remote device know that a buffer has been freed */
846 virtqueue_kick(rpmsg_lite_dev->rvq);
847 #endif
848
849 env_unlock_mutex(rpmsg_lite_dev->lock);
850
851 return RL_SUCCESS;
852 }
853
854 #endif /* RL_API_HAS_ZEROCOPY */
855
856 /******************************
857
858 mmmmm mm m mmmmm mmmmmmm
859 # #"m # # #
860 # # #m # # #
861 # # # # # #
862 mm#mm # ## mm#mm #
863
864 *****************************/
865 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
rpmsg_lite_master_init(void * shmem_addr,size_t shmem_length,uint32_t link_id,uint32_t init_flags,struct rpmsg_lite_instance * static_context)866 struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr,
867 size_t shmem_length,
868 uint32_t link_id,
869 uint32_t init_flags,
870 struct rpmsg_lite_instance *static_context)
871 #elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
872 struct rpmsg_lite_instance *rpmsg_lite_master_init(
873 void *shmem_addr, size_t shmem_length, uint32_t link_id, uint32_t init_flags, void *env_cfg)
874 #else
875 struct rpmsg_lite_instance *rpmsg_lite_master_init(void *shmem_addr,
876 size_t shmem_length,
877 uint32_t link_id,
878 uint32_t init_flags)
879 #endif
880 {
881 int32_t status;
882 void (*callback[2])(struct virtqueue *vq);
883 const char *vq_names[2];
884 struct vring_alloc_info ring_info;
885 struct virtqueue *vqs[2] = {0};
886 void *buffer;
887 uint32_t idx, j;
888 struct rpmsg_lite_instance *rpmsg_lite_dev = RL_NULL;
889
890 if (link_id > RL_PLATFORM_HIGHEST_LINK_ID)
891 {
892 return RL_NULL;
893 }
894
895 if (shmem_addr == RL_NULL)
896 {
897 return RL_NULL;
898 }
899
900 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
901 /* Get the custom shmem configuration defined per each rpmsg_lite instance
902 (i.e. per each link id) from the platform layer */
903 rpmsg_platform_shmem_config_t shmem_config = {0};
904 if (RL_SUCCESS != platform_get_custom_shmem_config(link_id, &shmem_config))
905 {
906 return RL_NULL;
907 }
908
909 /* shmem_config.buffer_count must be power of two (2, 4, ...) */
910 if (0U != (shmem_config.buffer_count & (shmem_config.buffer_count - 1U)))
911 {
912 return RL_NULL;
913 }
914
915 /* buffer size must be power of two (256, 512, ...) */
916 if (0U != ((shmem_config.buffer_payload_size + 16UL) & ((shmem_config.buffer_payload_size + 16UL) - 1U)))
917 {
918 return RL_NULL;
919 }
920
921 if ((2U * (uint32_t)shmem_config.buffer_count) >
922 ((RL_WORD_ALIGN_DOWN(shmem_length - 2U * shmem_config.vring_size)) /
923 (uint32_t)(shmem_config.buffer_payload_size + 16UL)))
924 {
925 return RL_NULL;
926 }
927 #else
928 if ((2U * (uint32_t)RL_BUFFER_COUNT) >
929 ((RL_WORD_ALIGN_DOWN(shmem_length - (uint32_t)RL_VRING_OVERHEAD)) / (uint32_t)RL_BUFFER_SIZE))
930 {
931 return RL_NULL;
932 }
933
934 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
935
936 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
937 if (static_context == RL_NULL)
938 {
939 return RL_NULL;
940 }
941 rpmsg_lite_dev = static_context;
942 #else
943 rpmsg_lite_dev = env_allocate_memory(sizeof(struct rpmsg_lite_instance));
944 if (rpmsg_lite_dev == RL_NULL)
945 {
946 return RL_NULL;
947 }
948 #endif
949
950 env_memset(rpmsg_lite_dev, 0, sizeof(struct rpmsg_lite_instance));
951 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
952 status = env_init(&rpmsg_lite_dev->env, env_cfg);
953 #else
954 status = env_init();
955 #endif
956 if (status != RL_SUCCESS)
957 {
958 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
959 env_free_memory(rpmsg_lite_dev); /* coco validated: not able to force the application to reach this line */
960 #endif
961 return RL_NULL; /* coco validated: not able to force the application to reach this line */
962 }
963
964 rpmsg_lite_dev->link_id = link_id;
965
966 /*
967 * Since device is RPMSG Remote so we need to manage the
968 * shared buffers. Create shared memory pool to handle buffers.
969 */
970 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
971 rpmsg_lite_dev->sh_mem_base =
972 (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + 2U * shmem_config.vring_size);
973 rpmsg_lite_dev->sh_mem_remaining = (RL_WORD_ALIGN_DOWN(shmem_length - 2U * shmem_config.vring_size)) /
974 (uint32_t)(shmem_config.buffer_payload_size + 16UL);
975 #else
976 rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + (uint32_t)RL_VRING_OVERHEAD);
977 rpmsg_lite_dev->sh_mem_remaining =
978 (RL_WORD_ALIGN_DOWN(shmem_length - (uint32_t)RL_VRING_OVERHEAD)) / (uint32_t)RL_BUFFER_SIZE;
979 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
980 rpmsg_lite_dev->sh_mem_total = rpmsg_lite_dev->sh_mem_remaining;
981
982 /* Initialize names and callbacks*/
983 vq_names[0] = "rx_vq";
984 vq_names[1] = "tx_vq";
985 callback[0] = rpmsg_lite_rx_callback;
986 callback[1] = rpmsg_lite_tx_callback;
987 rpmsg_lite_dev->vq_ops = &master_vq_ops;
988
989 /* Create virtqueue for each vring. */
990 for (idx = 0U; idx < 2U; idx++)
991 {
992 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
993 ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
994 (uint32_t)((idx == 0U) ? (0U) : (shmem_config.vring_size)));
995 ring_info.align = shmem_config.vring_align;
996 ring_info.num_descs = shmem_config.buffer_count;
997 #else
998 ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
999 (uint32_t)((idx == 0U) ? (0U) : (VRING_SIZE)));
1000 ring_info.align = VRING_ALIGN;
1001 ring_info.num_descs = RL_BUFFER_COUNT;
1002 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
1003
1004 env_memset((void *)ring_info.phy_addr, 0x00, (uint32_t)vring_size(ring_info.num_descs, ring_info.align));
1005
1006 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
1007 status = virtqueue_create_static((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info,
1008 callback[idx], virtqueue_notify, &vqs[idx],
1009 (struct vq_static_context *)&rpmsg_lite_dev->vq_ctxt[idx]);
1010 #else
1011 status = virtqueue_create((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, callback[idx],
1012 virtqueue_notify, &vqs[idx]);
1013 #endif /* RL_USE_STATIC_API */
1014
1015 if (status == RL_SUCCESS)
1016 {
1017 /* Initialize vring control block in virtqueue. */
1018 vq_ring_init(vqs[idx]);
1019
1020 /* Disable callbacks - will be enabled by the application
1021 * once initialization is completed.
1022 */
1023 virtqueue_disable_cb(vqs[idx]);
1024 }
1025 else
1026 {
1027 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1028 /* Free all already allocated memory for virtqueues */
1029 for (uint32_t a = 0U; a < 2U; a++)
1030 {
1031 if (RL_NULL != vqs[a])
1032 {
1033 virtqueue_free(vqs[a]);
1034 }
1035 }
1036 env_free_memory(rpmsg_lite_dev);
1037 #endif
1038 return RL_NULL;
1039 }
1040
1041 /* virtqueue has reference to the RPMsg Lite instance */
1042 vqs[idx]->priv = (void *)rpmsg_lite_dev;
1043 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1044 vqs[idx]->env = rpmsg_lite_dev->env;
1045 #endif
1046 }
1047
1048 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
1049 status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1, &rpmsg_lite_dev->lock_static_ctxt);
1050 #else
1051 status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1);
1052 #endif
1053 if (status != RL_SUCCESS)
1054 {
1055 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1056 /* Free all already allocated memory for virtqueues */
1057 for (uint32_t b = 0U; b < 2U; b++)
1058 {
1059 virtqueue_free(vqs[b]);
1060 }
1061 env_free_memory(rpmsg_lite_dev);
1062 #endif
1063 return RL_NULL;
1064 }
1065
1066 // FIXME - a better way to handle this , tx for master is rx for remote and vice versa.
1067 rpmsg_lite_dev->tvq = vqs[1];
1068 rpmsg_lite_dev->rvq = vqs[0];
1069
1070 for (j = 0U; j < 2U; j++)
1071 {
1072 for (idx = 0U; ((idx < vqs[j]->vq_nentries) && (idx < rpmsg_lite_dev->sh_mem_total)); idx++)
1073 {
1074 /* Initialize TX virtqueue buffers for remote device */
1075 buffer = (rpmsg_lite_dev->sh_mem_remaining > 0U) ?
1076 (rpmsg_lite_dev->sh_mem_base +
1077 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1078 (uint32_t)(shmem_config.buffer_payload_size + 16UL) *
1079 (rpmsg_lite_dev->sh_mem_total - rpmsg_lite_dev->sh_mem_remaining--)) :
1080 #else
1081 (uint32_t)RL_BUFFER_SIZE *
1082 (rpmsg_lite_dev->sh_mem_total - rpmsg_lite_dev->sh_mem_remaining--)) :
1083 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
1084 (RL_NULL);
1085
1086 RL_ASSERT(buffer != RL_NULL);
1087
1088 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1089 env_memset(buffer, 0x00, (uint32_t)(shmem_config.buffer_payload_size + 16UL));
1090 #else
1091 env_memset(buffer, 0x00, (uint32_t)RL_BUFFER_SIZE);
1092 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
1093 if (vqs[j] == rpmsg_lite_dev->rvq)
1094 {
1095 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1096 status =
1097 virtqueue_fill_avail_buffers(vqs[j], buffer, (uint32_t)(shmem_config.buffer_payload_size + 16UL));
1098 #else
1099 status = virtqueue_fill_avail_buffers(vqs[j], buffer, (uint32_t)RL_BUFFER_SIZE);
1100 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
1101 }
1102 else if (vqs[j] == rpmsg_lite_dev->tvq)
1103 {
1104 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1105 status =
1106 virtqueue_fill_used_buffers(vqs[j], buffer, (uint32_t)(shmem_config.buffer_payload_size + 16UL));
1107 #else
1108 status = virtqueue_fill_used_buffers(vqs[j], buffer, (uint32_t)RL_BUFFER_SIZE);
1109 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
1110 }
1111 else
1112 {
1113 /* coco begin validated: this branch will never met unless RAM is corrupted */
1114 }
1115
1116 if (status != RL_SUCCESS)
1117 {
1118 /* Clean up! */
1119 env_delete_mutex(rpmsg_lite_dev->lock);
1120 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1121 for (uint32_t c = 0U; c < 2U; c++)
1122 {
1123 virtqueue_free(vqs[c]);
1124 }
1125 env_free_memory(rpmsg_lite_dev);
1126 #endif
1127 return RL_NULL;
1128 }
1129 /* coco end */
1130 }
1131 }
1132
1133 /* Install ISRs */
1134 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1135 env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
1136 env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
1137 env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
1138 env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
1139 rpmsg_lite_dev->link_state = 1U;
1140 env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
1141 env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
1142 #else
1143 (void)platform_init_interrupt(rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
1144 (void)platform_init_interrupt(rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
1145 env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
1146 env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
1147 rpmsg_lite_dev->link_state = 1U;
1148 env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
1149 env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
1150 #endif
1151
1152 /*
1153 * Let the remote device know that Master is ready for
1154 * communication.
1155 */
1156 virtqueue_kick(rpmsg_lite_dev->rvq);
1157
1158 return rpmsg_lite_dev;
1159 }
1160
1161 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
rpmsg_lite_remote_init(void * shmem_addr,uint32_t link_id,uint32_t init_flags,struct rpmsg_lite_instance * static_context)1162 struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr,
1163 uint32_t link_id,
1164 uint32_t init_flags,
1165 struct rpmsg_lite_instance *static_context)
1166 #elif defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1167 struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr,
1168 uint32_t link_id,
1169 uint32_t init_flags,
1170 void *env_cfg)
1171 #else
1172 struct rpmsg_lite_instance *rpmsg_lite_remote_init(void *shmem_addr, uint32_t link_id, uint32_t init_flags)
1173 #endif
1174 {
1175 int32_t status;
1176 void (*callback[2])(struct virtqueue *vq);
1177 const char *vq_names[2];
1178 struct vring_alloc_info ring_info;
1179 struct virtqueue *vqs[2] = {0};
1180 uint32_t idx;
1181 struct rpmsg_lite_instance *rpmsg_lite_dev = RL_NULL;
1182
1183 if (link_id > RL_PLATFORM_HIGHEST_LINK_ID)
1184 {
1185 return RL_NULL;
1186 }
1187
1188 if (shmem_addr == RL_NULL)
1189 {
1190 return RL_NULL;
1191 }
1192
1193 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1194 /* Get the custom shmem configuration defined per each rpmsg_lite instance
1195 (i.e. per each link id) from the platform layer */
1196 rpmsg_platform_shmem_config_t shmem_config = {0};
1197 if (RL_SUCCESS != platform_get_custom_shmem_config(link_id, &shmem_config))
1198 {
1199 return RL_NULL;
1200 }
1201
1202 /* shmem_config.buffer_count must be power of two (2, 4, ...) */
1203 if (0U != (shmem_config.buffer_count & (shmem_config.buffer_count - 1U)))
1204 {
1205 return RL_NULL;
1206 }
1207
1208 /* buffer size must be power of two (256, 512, ...) */
1209 if (0U != ((shmem_config.buffer_payload_size + 16UL) & ((shmem_config.buffer_payload_size + 16UL) - 1U)))
1210 {
1211 return RL_NULL;
1212 }
1213 #endif /* defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1) */
1214
1215 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
1216 if (static_context == RL_NULL)
1217 {
1218 return RL_NULL;
1219 }
1220 rpmsg_lite_dev = static_context;
1221 #else
1222 rpmsg_lite_dev = env_allocate_memory(sizeof(struct rpmsg_lite_instance));
1223 if (rpmsg_lite_dev == RL_NULL)
1224 {
1225 return RL_NULL;
1226 }
1227 #endif
1228
1229 env_memset(rpmsg_lite_dev, 0, sizeof(struct rpmsg_lite_instance));
1230 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1231 status = env_init(&rpmsg_lite_dev->env, env_cfg);
1232 #else
1233 status = env_init();
1234 #endif
1235
1236 if (status != RL_SUCCESS)
1237 {
1238 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1239 env_free_memory(rpmsg_lite_dev); /* coco validated: not able to force the application to reach this line */
1240 #endif
1241 return RL_NULL; /* coco validated: not able to force the application to reach this line */
1242 }
1243
1244 rpmsg_lite_dev->link_id = link_id;
1245
1246 vq_names[0] = "tx_vq"; /* swapped in case of remote */
1247 vq_names[1] = "rx_vq";
1248 callback[0] = rpmsg_lite_tx_callback;
1249 callback[1] = rpmsg_lite_rx_callback;
1250 rpmsg_lite_dev->vq_ops = &remote_vq_ops;
1251 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1252 rpmsg_lite_dev->sh_mem_base =
1253 (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + 2U * shmem_config.vring_size);
1254 #else
1255 rpmsg_lite_dev->sh_mem_base = (char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr + (uint32_t)RL_VRING_OVERHEAD);
1256 #endif /* defined(RL_ALLOW_CUSTOM_VRING_CONFIG) && (RL_ALLOW_CUSTOM_VRING_CONFIG == 1) */
1257
1258 /* Create virtqueue for each vring. */
1259 for (idx = 0U; idx < 2U; idx++)
1260 {
1261 #if defined(RL_ALLOW_CUSTOM_SHMEM_CONFIG) && (RL_ALLOW_CUSTOM_SHMEM_CONFIG == 1)
1262 ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
1263 (uint32_t)((idx == 0U) ? (0U) : (shmem_config.vring_size)));
1264 ring_info.align = shmem_config.vring_align;
1265 ring_info.num_descs = shmem_config.buffer_count;
1266 #else
1267 ring_info.phy_addr = (void *)(char *)((uintptr_t)(char *)RL_WORD_ALIGN_UP((uintptr_t)(char *)shmem_addr) +
1268 (uint32_t)((idx == 0U) ? (0U) : (VRING_SIZE)));
1269 ring_info.align = VRING_ALIGN;
1270 ring_info.num_descs = RL_BUFFER_COUNT;
1271 #endif /* defined(RL_ALLOW_CUSTOM_VRING_CONFIG) && (RL_ALLOW_CUSTOM_VRING_CONFIG == 1) */
1272
1273 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
1274 status = virtqueue_create_static((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info,
1275 callback[idx], virtqueue_notify, &vqs[idx],
1276 (struct vq_static_context *)&rpmsg_lite_dev->vq_ctxt[idx]);
1277 #else
1278 status = virtqueue_create((uint16_t)(RL_GET_VQ_ID(link_id, idx)), vq_names[idx], &ring_info, callback[idx],
1279 virtqueue_notify, &vqs[idx]);
1280 #endif /* RL_USE_STATIC_API */
1281
1282 if (status != RL_SUCCESS)
1283 {
1284 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1285 /* Free all already allocated memory for virtqueues */
1286 for (uint32_t a = 0U; a < 2U; a++)
1287 {
1288 if (RL_NULL != vqs[a])
1289 {
1290 virtqueue_free(vqs[a]);
1291 }
1292 }
1293 env_free_memory(rpmsg_lite_dev);
1294 #endif
1295 return RL_NULL;
1296 }
1297
1298 /* virtqueue has reference to the RPMsg Lite instance */
1299 vqs[idx]->priv = (void *)rpmsg_lite_dev;
1300 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1301 vqs[idx]->env = rpmsg_lite_dev->env;
1302 #endif
1303 }
1304
1305 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
1306 status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1, &rpmsg_lite_dev->lock_static_ctxt);
1307 #else
1308 status = env_create_mutex((LOCK *)&rpmsg_lite_dev->lock, 1);
1309 #endif
1310 if (status != RL_SUCCESS)
1311 {
1312 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1313 /* Free all already allocated memory for virtqueues */
1314 for (uint32_t b = 0U; b < 2U; b++)
1315 {
1316 virtqueue_free(vqs[b]);
1317 }
1318 env_free_memory(rpmsg_lite_dev);
1319 #endif
1320 return RL_NULL;
1321 }
1322
1323 // FIXME - a better way to handle this , tx for master is rx for remote and vice versa.
1324 rpmsg_lite_dev->tvq = vqs[0];
1325 rpmsg_lite_dev->rvq = vqs[1];
1326
1327 /* Install ISRs */
1328 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1329 env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
1330 env_init_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
1331 env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
1332 env_disable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
1333 rpmsg_lite_dev->link_state = 0;
1334 env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
1335 env_enable_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
1336 #else
1337 (void)platform_init_interrupt(rpmsg_lite_dev->rvq->vq_queue_index, rpmsg_lite_dev->rvq);
1338 (void)platform_init_interrupt(rpmsg_lite_dev->tvq->vq_queue_index, rpmsg_lite_dev->tvq);
1339 env_disable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
1340 env_disable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
1341 rpmsg_lite_dev->link_state = 0;
1342 env_enable_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
1343 env_enable_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
1344 #endif
1345
1346 return rpmsg_lite_dev;
1347 }
1348
1349 /*******************************************
1350
1351 mmmm mmmmmm mmmmm mm m mmmmm mmmmmmm
1352 # "m # # #"m # # #
1353 # # #mmmmm # # #m # # #
1354 # # # # # # # # #
1355 #mmm" #mmmmm mm#mm # ## mm#mm #
1356
1357 ********************************************/
1358
rpmsg_lite_deinit(struct rpmsg_lite_instance * rpmsg_lite_dev)1359 int32_t rpmsg_lite_deinit(struct rpmsg_lite_instance *rpmsg_lite_dev)
1360 {
1361 if (rpmsg_lite_dev == RL_NULL)
1362 {
1363 return RL_ERR_PARAM;
1364 }
1365
1366 if (!((rpmsg_lite_dev->rvq != RL_NULL) && (rpmsg_lite_dev->tvq != RL_NULL) && (rpmsg_lite_dev->lock != RL_NULL)))
1367 {
1368 /* ERROR - trying to initialize uninitialized RPMSG? */
1369 RL_ASSERT((rpmsg_lite_dev->rvq != RL_NULL) && (rpmsg_lite_dev->tvq != RL_NULL) &&
1370 (rpmsg_lite_dev->lock != RL_NULL));
1371 return RL_ERR_PARAM;
1372 }
1373 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1374 env_deinit_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->rvq->vq_queue_index);
1375 env_deinit_interrupt(rpmsg_lite_dev->env, rpmsg_lite_dev->tvq->vq_queue_index);
1376 #else
1377 (void)platform_deinit_interrupt(rpmsg_lite_dev->rvq->vq_queue_index);
1378 (void)platform_deinit_interrupt(rpmsg_lite_dev->tvq->vq_queue_index);
1379 #endif
1380 rpmsg_lite_dev->link_state = 0;
1381
1382 #if defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1)
1383 virtqueue_free_static(rpmsg_lite_dev->rvq);
1384 virtqueue_free_static(rpmsg_lite_dev->tvq);
1385 #else
1386 virtqueue_free(rpmsg_lite_dev->rvq);
1387 virtqueue_free(rpmsg_lite_dev->tvq);
1388 #endif /* RL_USE_STATIC_API */
1389 rpmsg_lite_dev->rvq = RL_NULL;
1390 rpmsg_lite_dev->tvq = RL_NULL;
1391
1392 env_delete_mutex(rpmsg_lite_dev->lock);
1393 #if defined(RL_USE_ENVIRONMENT_CONTEXT) && (RL_USE_ENVIRONMENT_CONTEXT == 1)
1394 (void)env_deinit(rpmsg_lite_dev->env);
1395 #else
1396 (void)env_deinit();
1397 #endif
1398
1399 #if !(defined(RL_USE_STATIC_API) && (RL_USE_STATIC_API == 1))
1400 env_free_memory(rpmsg_lite_dev);
1401 #endif /* RL_USE_STATIC_API */
1402
1403 return RL_SUCCESS;
1404 }
1405