1 /*
2 * Copyright (c) 2023 Nordic Semiconductor ASA
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /*
8 * ICBMsg backend.
9 *
10 * This is an IPC service backend that dynamically allocates buffers for data storage
11 * and uses ICMsg to send references to them.
12 *
13 * Shared memory organization
14 * --------------------------
15 *
16 * Single channel (RX or TX) of the shared memory is divided into two areas: ICMsg area
17 * followed by Blocks area. ICMsg is used to send and receive short 3-byte messages.
18 * Blocks area is evenly divided into aligned blocks. Blocks are used to allocate
19 * buffers containing actual data. Data buffers can span multiple blocks. The first block
20 * starts with the size of the following data.
21 *
22 * +------------+-------------+
23 * | ICMsg area | Blocks area |
24 * +------------+-------------+
25 * _______/ \_________________________________________
26 * / \
27 * +-----------+-----------+-----------+-----------+- -+-----------+
28 * | Block 0 | Block 1 | Block 2 | Block 3 | ... | Block N-1 |
29 * +-----------+-----------+-----------+-----------+- -+-----------+
30 * _____/ \_____
31 * / \
32 * +------+--------------------------------+---------+
33 * | size | data_buffer[size] ... | padding |
34 * +------+--------------------------------+---------+
35 *
36 * The sender holds information about reserved blocks using bitarray and it is responsible
37 * for allocating and releasing the blocks. The receiver just tells the sender that it
38 * does not need a specific buffer anymore.
39 *
40 * Control messages
41 * ----------------
42 *
43 * ICMsg is used to send and receive small 3-byte control messages.
44 *
45 * - Send data
46 * | MSG_DATA | endpoint address | block index |
47 * This message is used to send data buffer to specific endpoint.
48 *
49 * - Release data
50 * | MSG_RELEASE_DATA | 0 | block index |
51 * This message is a response to the "Send data" message and it is used to inform that
52 * specific buffer is not used anymore and can be released. Endpoint addresses does
53 * not matter here, so it is zero.
54 *
55 * - Bound endpoint
56 * | MSG_BOUND | endpoint address | block index |
57 * This message starts the bounding of the endpoint. The buffer contains a
58 * null-terminated endpoint name.
59 *
60 * - Release bound endpoint
61 * | MSG_RELEASE_BOUND | endpoint address | block index |
62 * This message is a response to the "Bound endpoint" message and it is used to inform
63 * that a specific buffer (starting at "block index") is not used anymore and
64 * a the endpoint is bounded and can now receive a data.
65 *
66 * Bounding endpoints
67 * ------------------
68 *
69 * When ICMsg is bounded and user registers an endpoint on initiator side, the backend
70 * sends "Bound endpoint". Endpoint address is assigned by the initiator. When follower
71 * gets the message and user on follower side also registered the same endpoint,
72 * the backend calls "bound" callback and sends back "Release bound endpoint".
73 * The follower saves the endpoint address. The follower's endpoint is ready to send
74 * and receive data. When the initiator gets "Release bound endpoint" message or any
75 * data messages, it calls bound endpoint and it is ready to send data.
76 */
77
78 #undef _POSIX_C_SOURCE
79 #define _POSIX_C_SOURCE 200809L /* For strnlen() */
80
81 #include <string.h>
82
83 #include <zephyr/logging/log.h>
84 #include <zephyr/device.h>
85 #include <zephyr/sys/bitarray.h>
86 #include <zephyr/ipc/icmsg.h>
87 #include <zephyr/ipc/ipc_service_backend.h>
88 #include <zephyr/cache.h>
89
90 #if defined(CONFIG_ARCH_POSIX)
91 #include <soc.h>
92 #define MAYBE_CONST
93 #else
94 #define MAYBE_CONST const
95 #endif
96
97 LOG_MODULE_REGISTER(ipc_icbmsg,
98 CONFIG_IPC_SERVICE_BACKEND_ICBMSG_LOG_LEVEL);
99
100 #define DT_DRV_COMPAT zephyr_ipc_icbmsg
101
102 /** Allowed number of endpoints. */
103 #define NUM_EPT CONFIG_IPC_SERVICE_BACKEND_ICBMSG_NUM_EP
104
105 /** Special endpoint address indicating invalid (or empty) entry. */
106 #define EPT_ADDR_INVALID 0xFF
107
108 /** Special value for empty entry in bound message waiting table. */
109 #define WAITING_BOUND_MSG_EMPTY 0xFFFF
110
111 /** Size of the header (size field) of the block. */
112 #define BLOCK_HEADER_SIZE (sizeof(struct block_header))
113
114 /** Flag indicating that ICMsg was bounded for this instance. */
115 #define CONTROL_BOUNDED BIT(31)
116
117 /** Registered endpoints count mask in flags. */
118 #define FLAG_EPT_COUNT_MASK 0xFFFF
119
120 /** Workqueue stack size for bounding processing (this configuration is not optimized). */
121 #define EP_BOUND_WORK_Q_STACK_SIZE (512U)
122
123 /** Workqueue priority for bounding processing. */
124 #define EP_BOUND_WORK_Q_PRIORITY (CONFIG_SYSTEM_WORKQUEUE_PRIORITY)
125
126 enum msg_type {
127 MSG_DATA = 0, /* Data message. */
128 MSG_RELEASE_DATA, /* Release data buffer message. */
129 MSG_BOUND, /* Endpoint bounding message. */
130 MSG_RELEASE_BOUND, /* Release endpoint bound message.
131 * This message is also indicator for the receiving side
132 * that the endpoint bounding was fully processed on
133 * the sender side.
134 */
135 };
136
137 enum ept_bounding_state {
138 EPT_UNCONFIGURED = 0, /* Endpoint in not configured (initial state). */
139 EPT_CONFIGURED, /* Endpoint is configured, waiting for work queue to
140 * start bounding process.
141 */
142 EPT_BOUNDING, /* Only on initiator. Bound message was send,
143 * but bound callback was not called yet, because
144 * we are waiting for any incoming messages.
145 */
146 EPT_READY, /* Bounding is done. Bound callback was called. */
147 };
148
149 enum ept_rebound_state {
150 EPT_NORMAL = 0, /* No endpoint rebounding is needed. */
151 EPT_DEREGISTERED, /* Endpoint was deregistered. */
152 EPT_REBOUNDING, /* Rebounding was requested, waiting for work queue to
153 * start rebounding process.
154 */
155 };
156
157 struct channel_config {
158 uint8_t *blocks_ptr; /* Address where the blocks start. */
159 size_t block_size; /* Size of one block. */
160 size_t block_count; /* Number of blocks. */
161 };
162
163 struct icbmsg_config {
164 struct icmsg_config_t control_config; /* Configuration of the ICMsg. */
165 struct channel_config rx; /* RX channel config. */
166 struct channel_config tx; /* TX channel config. */
167 sys_bitarray_t *tx_usage_bitmap; /* Bit is set when TX block is in use */
168 sys_bitarray_t *rx_hold_bitmap; /* Bit is set, if the buffer starting at
169 * this block should be kept after exit
170 * from receive handler.
171 */
172 };
173
174 struct ept_data {
175 const struct ipc_ept_cfg *cfg; /* Endpoint configuration. */
176 atomic_t state; /* Bounding state. */
177 atomic_t rebound_state; /* Rebounding state. */
178 uint8_t addr; /* Endpoint address. */
179 };
180
181 struct backend_data {
182 const struct icbmsg_config *conf;/* Backend instance config. */
183 struct icmsg_data_t control_data;/* ICMsg data. */
184 #ifdef CONFIG_MULTITHREADING
185 struct k_mutex mutex; /* Mutex to protect: ICMsg send call and
186 * waiting_bound field.
187 */
188 struct k_work ep_bound_work; /* Work item for bounding processing. */
189 struct k_sem block_wait_sem; /* Semaphore for waiting for free blocks. */
190 #endif
191 struct ept_data ept[NUM_EPT]; /* Array of registered endpoints. */
192 uint8_t ept_map[NUM_EPT]; /* Array that maps endpoint address to index. */
193 uint16_t waiting_bound[NUM_EPT];/* The bound messages waiting to be registered. */
194 atomic_t flags; /* Flags on higher bits, number of registered
195 * endpoints on lower.
196 */
197 bool is_initiator; /* This side has an initiator role. */
198 };
199
200 struct block_header {
201 volatile size_t size; /* Size of the data field. It must be volatile, because
202 * when this value is read and validated for security
203 * reasons, compiler cannot generate code that reads
204 * it again after validation.
205 */
206 };
207
208 struct block_content {
209 struct block_header header;
210 uint8_t data[]; /* Buffer data. */
211 };
212
213 struct control_message {
214 uint8_t msg_type; /* Message type. */
215 uint8_t ept_addr; /* Endpoint address or zero for MSG_RELEASE_DATA. */
216 uint8_t block_index; /* Block index to send or release. */
217 };
218
219 BUILD_ASSERT(NUM_EPT <= EPT_ADDR_INVALID, "Too many endpoints");
220
221 #ifdef CONFIG_MULTITHREADING
222 /* Work queue for bounding processing. */
223 static struct k_work_q ep_bound_work_q;
224 #endif
225
226 /**
227 * Calculate pointer to block from its index and channel configuration (RX or TX).
228 * No validation is performed.
229 */
block_from_index(const struct channel_config * ch_conf,size_t block_index)230 static struct block_content *block_from_index(const struct channel_config *ch_conf,
231 size_t block_index)
232 {
233 return (struct block_content *)(ch_conf->blocks_ptr +
234 block_index * ch_conf->block_size);
235 }
236
237 /**
238 * Calculate pointer to data buffer from block index and channel configuration (RX or TX).
239 * Also validate the index and optionally the buffer size allocated on the this block.
240 *
241 * @param[in] ch_conf The channel
242 * @param[in] block_index Block index
243 * @param[out] size Size of the buffer allocated on the block if not NULL.
244 * The size is also checked if it fits in the blocks area.
245 * If it is NULL, no size validation is performed.
246 * @param[in] invalidate_cache If size is not NULL, invalidates cache for entire buffer
247 * (all blocks). Otherwise, it is ignored.
248 * @return Pointer to data buffer or NULL if validation failed.
249 */
buffer_from_index_validate(const struct channel_config * ch_conf,size_t block_index,size_t * size,bool invalidate_cache)250 static uint8_t *buffer_from_index_validate(const struct channel_config *ch_conf,
251 size_t block_index, size_t *size,
252 bool invalidate_cache)
253 {
254 size_t allocable_size;
255 size_t buffer_size;
256 uint8_t *end_ptr;
257 struct block_content *block;
258
259 if (block_index >= ch_conf->block_count) {
260 LOG_ERR("Block index invalid");
261 return NULL;
262 }
263
264 block = block_from_index(ch_conf, block_index);
265
266 if (size != NULL) {
267 if (invalidate_cache) {
268 sys_cache_data_invd_range(block, BLOCK_HEADER_SIZE);
269 __sync_synchronize();
270 }
271 allocable_size = ch_conf->block_count * ch_conf->block_size;
272 end_ptr = ch_conf->blocks_ptr + allocable_size;
273 buffer_size = block->header.size;
274
275 if ((buffer_size > allocable_size - BLOCK_HEADER_SIZE) ||
276 (&block->data[buffer_size] > end_ptr)) {
277 LOG_ERR("Block corrupted");
278 return NULL;
279 }
280
281 *size = buffer_size;
282 if (invalidate_cache) {
283 sys_cache_data_invd_range(block->data, buffer_size);
284 __sync_synchronize();
285 }
286 }
287
288 return block->data;
289 }
290
291 /**
292 * Calculate block index based on data buffer pointer and validate it.
293 *
294 * @param[in] ch_conf The channel
295 * @param[in] buffer Pointer to data buffer
296 * @param[out] size Size of the allocated buffer if not NULL.
297 * The size is also checked if it fits in the blocks area.
298 * If it is NULL, no size validation is performed.
299 * @return Block index or negative error code
300 * @retval -EINVAL The buffer is not correct
301 */
buffer_to_index_validate(const struct channel_config * ch_conf,const uint8_t * buffer,size_t * size)302 static int buffer_to_index_validate(const struct channel_config *ch_conf,
303 const uint8_t *buffer, size_t *size)
304 {
305 size_t block_index;
306 uint8_t *expected;
307
308 block_index = (buffer - ch_conf->blocks_ptr) / ch_conf->block_size;
309
310 expected = buffer_from_index_validate(ch_conf, block_index, size, false);
311
312 if (expected == NULL || expected != buffer) {
313 LOG_ERR("Pointer invalid");
314 return -EINVAL;
315 }
316
317 return block_index;
318 }
319
320 /**
321 * Allocate buffer for transmission
322 *
323 * @param[in,out] size Required size of the buffer. If set to zero, the first available block will
324 * be allocated, together with all contiguous free blocks that follow it.
325 * On success, size will contain the actually allocated size, which will be
326 * at least the requested size.
327 * @param[out] buffer Pointer to the newly allocated buffer.
328 * @param[in] timeout Timeout.
329 *
330 * @return Positive index of the first allocated block or negative error.
331 * @retval -ENOMEM If requested size is bigger than entire allocable space, or
332 * the timeout was K_NO_WAIT and there was not enough space.
333 * @retval -EAGAIN If timeout occurred.
334 */
alloc_tx_buffer(struct backend_data * dev_data,uint32_t * size,uint8_t ** buffer,k_timeout_t timeout)335 static int alloc_tx_buffer(struct backend_data *dev_data, uint32_t *size,
336 uint8_t **buffer, k_timeout_t timeout)
337 {
338 const struct icbmsg_config *conf = dev_data->conf;
339 size_t total_size = *size + BLOCK_HEADER_SIZE;
340 size_t num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size);
341 struct block_content *block;
342 #ifdef CONFIG_MULTITHREADING
343 bool sem_taken = false;
344 #endif
345 size_t tx_block_index;
346 size_t next_bit;
347 int prev_bit_val;
348 int r;
349
350 #ifdef CONFIG_MULTITHREADING
351 do {
352 /* Try to allocate specified number of blocks. */
353 r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks,
354 &tx_block_index);
355 if (r == -ENOSPC && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
356 /* Wait for releasing if there is no enough space and exit loop
357 * on timeout.
358 */
359 r = k_sem_take(&dev_data->block_wait_sem, timeout);
360 if (r < 0) {
361 break;
362 }
363 sem_taken = true;
364 } else {
365 /* Exit loop if space was allocated or other error occurred. */
366 break;
367 }
368 } while (true);
369
370 /* If semaphore was taken, give it back because this thread does not
371 * necessary took all available space, so other thread may need it.
372 */
373 if (sem_taken) {
374 k_sem_give(&dev_data->block_wait_sem);
375 }
376 #else
377 /* Try to allocate specified number of blocks. */
378 r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, &tx_block_index);
379 #endif
380
381 if (r < 0) {
382 if (r != -ENOSPC && r != -EAGAIN) {
383 LOG_ERR("Failed to allocate buffer, err: %d", r);
384 /* Only -EINVAL is allowed in this place. Any other code
385 * indicates something wrong with the logic.
386 */
387 __ASSERT_NO_MSG(r == -EINVAL);
388 }
389
390 if (r == -ENOSPC || r == -EINVAL) {
391 /* IPC service require -ENOMEM error in case of no memory. */
392 r = -ENOMEM;
393 }
394 return r;
395 }
396
397 /* If size is 0 try to allocate more blocks after already allocated. */
398 if (*size == 0) {
399 prev_bit_val = 0;
400 for (next_bit = tx_block_index + 1; next_bit < conf->tx.block_count;
401 next_bit++) {
402 r = sys_bitarray_test_and_set_bit(conf->tx_usage_bitmap, next_bit,
403 &prev_bit_val);
404 /** Setting bit should always success. */
405 __ASSERT_NO_MSG(r == 0);
406 if (prev_bit_val) {
407 break;
408 }
409 }
410 num_blocks = next_bit - tx_block_index;
411 }
412
413 /* Get block pointer and adjust size to actually allocated space. */
414 *size = conf->tx.block_size * num_blocks - BLOCK_HEADER_SIZE;
415 block = block_from_index(&conf->tx, tx_block_index);
416 block->header.size = *size;
417 *buffer = block->data;
418 return tx_block_index;
419 }
420
421 /**
422 * Release all or part of the blocks occupied by the buffer.
423 *
424 * @param[in] tx_block_index First block index to release, no validation is performed,
425 * so caller is responsible for passing valid index.
426 * @param[in] size Size of data buffer, no validation is performed,
427 * so caller is responsible for passing valid size.
428 * @param[in] new_size If less than zero, release all blocks, otherwise reduce
429 * size to this value and update size in block header.
430 *
431 * @returns Positive block index where the buffer starts or negative error.
432 * @retval -EINVAL If invalid buffer was provided or size is greater than already
433 * allocated size.
434 */
release_tx_blocks(struct backend_data * dev_data,size_t tx_block_index,size_t size,int new_size)435 static int release_tx_blocks(struct backend_data *dev_data, size_t tx_block_index,
436 size_t size, int new_size)
437 {
438 const struct icbmsg_config *conf = dev_data->conf;
439 struct block_content *block;
440 size_t num_blocks;
441 size_t total_size;
442 size_t new_total_size;
443 size_t new_num_blocks;
444 size_t release_index;
445 int r;
446
447 /* Calculate number of blocks. */
448 total_size = size + BLOCK_HEADER_SIZE;
449 num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size);
450
451 if (new_size >= 0) {
452 /* Calculate and validate new values. */
453 new_total_size = new_size + BLOCK_HEADER_SIZE;
454 new_num_blocks = DIV_ROUND_UP(new_total_size, conf->tx.block_size);
455 if (new_num_blocks > num_blocks) {
456 LOG_ERR("Requested %d blocks, allocated %d", new_num_blocks,
457 num_blocks);
458 return -EINVAL;
459 }
460 /* Update actual buffer size and number of blocks to release. */
461 block = block_from_index(&conf->tx, tx_block_index);
462 block->header.size = new_size;
463 release_index = tx_block_index + new_num_blocks;
464 num_blocks = num_blocks - new_num_blocks;
465 } else {
466 /* If size is negative, release all blocks. */
467 release_index = tx_block_index;
468 }
469
470 if (num_blocks > 0) {
471 /* Free bits in the bitmap. */
472 r = sys_bitarray_free(conf->tx_usage_bitmap, num_blocks,
473 release_index);
474 if (r < 0) {
475 LOG_ERR("Cannot free bits, err %d", r);
476 return r;
477 }
478
479 #ifdef CONFIG_MULTITHREADING
480 /* Wake up all waiting threads. */
481 k_sem_give(&dev_data->block_wait_sem);
482 #endif
483 }
484
485 return tx_block_index;
486 }
487
488 /**
489 * Release all or part of the blocks occupied by the buffer.
490 *
491 * @param[in] buffer Buffer to release.
492 * @param[in] new_size If less than zero, release all blocks, otherwise reduce size to
493 * this value and update size in block header.
494 *
495 * @returns Positive block index where the buffer starts or negative error.
496 * @retval -EINVAL If invalid buffer was provided or size is greater than already
497 * allocated size.
498 */
release_tx_buffer(struct backend_data * dev_data,const uint8_t * buffer,int new_size)499 static int release_tx_buffer(struct backend_data *dev_data, const uint8_t *buffer,
500 int new_size)
501 {
502 const struct icbmsg_config *conf = dev_data->conf;
503 size_t size = 0;
504 int tx_block_index;
505
506 tx_block_index = buffer_to_index_validate(&conf->tx, buffer, &size);
507 if (tx_block_index < 0) {
508 return tx_block_index;
509 }
510
511 return release_tx_blocks(dev_data, tx_block_index, size, new_size);
512 }
513
514 /**
515 * Send control message over ICMsg with mutex locked. Mutex must be locked because
516 * ICMsg may return error on concurrent invocations even when there is enough space
517 * in queue.
518 */
send_control_message(struct backend_data * dev_data,enum msg_type msg_type,uint8_t ept_addr,uint8_t block_index)519 static int send_control_message(struct backend_data *dev_data, enum msg_type msg_type,
520 uint8_t ept_addr, uint8_t block_index)
521 {
522 const struct icbmsg_config *conf = dev_data->conf;
523 const struct control_message message = {
524 .msg_type = (uint8_t)msg_type,
525 .ept_addr = ept_addr,
526 .block_index = block_index,
527 };
528 int r;
529
530 #ifdef CONFIG_MULTITHREADING
531 k_mutex_lock(&dev_data->mutex, K_FOREVER);
532 #endif
533 r = icmsg_send(&conf->control_config, &dev_data->control_data, &message,
534 sizeof(message));
535 #ifdef CONFIG_MULTITHREADING
536 k_mutex_unlock(&dev_data->mutex);
537 #endif
538 if (r < sizeof(message)) {
539 LOG_ERR("Cannot send over ICMsg, err %d", r);
540 }
541 return r;
542 }
543
544 /**
545 * Release received buffer. This function will just send release control message.
546 *
547 * @param[in] buffer Buffer to release.
548 * @param[in] msg_type Message type: MSG_RELEASE_BOUND or MSG_RELEASE_DATA.
549 * @param[in] ept_addr Endpoint address or zero for MSG_RELEASE_DATA.
550 *
551 * @return zero or ICMsg send error.
552 */
send_release(struct backend_data * dev_data,const uint8_t * buffer,enum msg_type msg_type,uint8_t ept_addr)553 static int send_release(struct backend_data *dev_data, const uint8_t *buffer,
554 enum msg_type msg_type, uint8_t ept_addr)
555 {
556 const struct icbmsg_config *conf = dev_data->conf;
557 int rx_block_index;
558
559 rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL);
560 if (rx_block_index < 0) {
561 return rx_block_index;
562 }
563
564 return send_control_message(dev_data, msg_type, ept_addr, rx_block_index);
565 }
566
567 /**
568 * Send data contained in specified block. It will adjust data size and flush cache
569 * if necessary. If sending failed, allocated blocks will be released.
570 *
571 * @param[in] msg_type Message type: MSG_BOUND or MSG_DATA.
572 * @param[in] ept_addr Endpoints address.
573 * @param[in] tx_block_index Index of first block containing data, it is not validated,
574 * so caller is responsible for passing only valid index.
575 * @param[in] size Actual size of the data, can be smaller than allocated,
576 * but it cannot change number of required blocks.
577 *
578 * @return number of bytes sent in the message or negative error code.
579 */
send_block(struct backend_data * dev_data,enum msg_type msg_type,uint8_t ept_addr,size_t tx_block_index,size_t size)580 static int send_block(struct backend_data *dev_data, enum msg_type msg_type,
581 uint8_t ept_addr, size_t tx_block_index, size_t size)
582 {
583 struct block_content *block;
584 int r;
585
586 block = block_from_index(&dev_data->conf->tx, tx_block_index);
587
588 block->header.size = size;
589 __sync_synchronize();
590 sys_cache_data_flush_range(block, size + BLOCK_HEADER_SIZE);
591
592 r = send_control_message(dev_data, msg_type, ept_addr, tx_block_index);
593 if (r < 0) {
594 release_tx_blocks(dev_data, tx_block_index, size, -1);
595 }
596
597 return r;
598 }
599
600 /**
601 * Find endpoint that was registered with name that matches name
602 * contained in the endpoint bound message received from remote.
603 *
604 * @param[in] name Endpoint name, it must be in a received block.
605 *
606 * @return Found endpoint index or -ENOENT if not found.
607 */
find_ept_by_name(struct backend_data * dev_data,const char * name)608 static int find_ept_by_name(struct backend_data *dev_data, const char *name)
609 {
610 const struct channel_config *rx_conf = &dev_data->conf->rx;
611 const char *buffer_end = (const char *)rx_conf->blocks_ptr +
612 rx_conf->block_count * rx_conf->block_size;
613 struct ept_data *ept;
614 size_t name_size;
615 size_t i;
616
617 /* Requested name is in shared memory, so we have to assume that it
618 * can be corrupted. Extra care must be taken to avoid out of
619 * bounds reads.
620 */
621 name_size = strnlen(name, buffer_end - name - 1) + 1;
622
623 for (i = 0; i < NUM_EPT; i++) {
624 ept = &dev_data->ept[i];
625 if (atomic_get(&ept->state) == EPT_CONFIGURED &&
626 strncmp(ept->cfg->name, name, name_size) == 0) {
627 return i;
628 }
629 }
630
631 return -ENOENT;
632 }
633
634 /**
635 * Find registered endpoint that matches given "bound endpoint" message. When found,
636 * the "release bound endpoint" message is send.
637 *
638 * @param[in] rx_block_index Block containing the "bound endpoint" message.
639 * @param[in] ept_addr Endpoint address.
640 *
641 * @return negative error code or non-negative search result.
642 * @retval 0 match not found.
643 * @retval 1 match found and processing was successful.
644 */
match_bound_msg(struct backend_data * dev_data,size_t rx_block_index,uint8_t ept_addr)645 static int match_bound_msg(struct backend_data *dev_data, size_t rx_block_index,
646 uint8_t ept_addr)
647 {
648 const struct icbmsg_config *conf = dev_data->conf;
649 struct block_content *block;
650 uint8_t *buffer;
651 int ept_index;
652 struct ept_data *ept;
653 int r;
654 bool valid_state;
655
656 /* Find endpoint that matches requested name. */
657 block = block_from_index(&conf->rx, rx_block_index);
658 buffer = block->data;
659 ept_index = find_ept_by_name(dev_data, buffer);
660 if (ept_index < 0) {
661 return 0;
662 }
663
664 /* Set endpoint address and mapping. Move it to "ready" state. */
665 ept = &dev_data->ept[ept_index];
666 ept->addr = ept_addr;
667 dev_data->ept_map[ept->addr] = ept_index;
668 valid_state = atomic_cas(&ept->state, EPT_CONFIGURED, EPT_READY);
669 if (!valid_state) {
670 LOG_ERR("Unexpected bounding from remote on endpoint %d", ept_addr);
671 return -EINVAL;
672 }
673
674 /* Endpoint is ready to send messages, so call bound callback. */
675 if (ept->cfg->cb.bound != NULL) {
676 ept->cfg->cb.bound(ept->cfg->priv);
677 }
678
679 /* Release the bound message and inform remote that we are ready to receive. */
680 r = send_release(dev_data, buffer, MSG_RELEASE_BOUND, ept_addr);
681 if (r < 0) {
682 return r;
683 }
684
685 return 1;
686 }
687
688 /**
689 * Send bound message on specified endpoint.
690 *
691 * @param[in] ept Endpoint to use.
692 *
693 * @return non-negative value in case of success or negative error code.
694 */
send_bound_message(struct backend_data * dev_data,struct ept_data * ept)695 static int send_bound_message(struct backend_data *dev_data, struct ept_data *ept)
696 {
697 size_t msg_len;
698 uint32_t alloc_size;
699 uint8_t *buffer;
700 int r;
701
702 msg_len = strlen(ept->cfg->name) + 1;
703 alloc_size = msg_len;
704 r = alloc_tx_buffer(dev_data, &alloc_size, &buffer, K_FOREVER);
705 if (r >= 0) {
706 strcpy(buffer, ept->cfg->name);
707 r = send_block(dev_data, MSG_BOUND, ept->addr, r, msg_len);
708 }
709
710 return r;
711 }
712
713 #ifdef CONFIG_MULTITHREADING
714 /**
715 * Put endpoint bound processing into system workqueue.
716 */
schedule_ept_bound_process(struct backend_data * dev_data)717 static void schedule_ept_bound_process(struct backend_data *dev_data)
718 {
719 k_work_submit_to_queue(&ep_bound_work_q, &dev_data->ep_bound_work);
720 }
721 #endif
722
723 /**
724 * Work handler that is responsible to start bounding when ICMsg is bound.
725 */
726 #ifdef CONFIG_MULTITHREADING
ept_bound_process(struct k_work * item)727 static void ept_bound_process(struct k_work *item)
728 #else
729 static void ept_bound_process(struct backend_data *dev_data)
730 #endif
731 {
732 #ifdef CONFIG_MULTITHREADING
733 struct backend_data *dev_data = CONTAINER_OF(item, struct backend_data,
734 ep_bound_work);
735 #endif
736 struct ept_data *ept = NULL;
737 size_t i;
738 int r = 0;
739 bool matching_state;
740
741 /* Skip processing if ICMsg was not bounded yet. */
742 if (!(atomic_get(&dev_data->flags) & CONTROL_BOUNDED)) {
743 return;
744 }
745
746 if (dev_data->is_initiator) {
747 /* Initiator just sends bound message after endpoint was registered. */
748 for (i = 0; i < NUM_EPT; i++) {
749 ept = &dev_data->ept[i];
750 matching_state = atomic_cas(&ept->state, EPT_CONFIGURED,
751 EPT_BOUNDING);
752 if (matching_state) {
753 r = send_bound_message(dev_data, ept);
754 if (r < 0) {
755 atomic_set(&ept->state, EPT_UNCONFIGURED);
756 LOG_ERR("Failed to send bound, err %d", r);
757 }
758 }
759 }
760 } else {
761 /* Walk over all waiting bound messages and match to local endpoints. */
762 #ifdef CONFIG_MULTITHREADING
763 k_mutex_lock(&dev_data->mutex, K_FOREVER);
764 #endif
765 for (i = 0; i < NUM_EPT; i++) {
766 if (dev_data->waiting_bound[i] != WAITING_BOUND_MSG_EMPTY) {
767 #ifdef CONFIG_MULTITHREADING
768 k_mutex_unlock(&dev_data->mutex);
769 #endif
770 r = match_bound_msg(dev_data,
771 dev_data->waiting_bound[i], i);
772 #ifdef CONFIG_MULTITHREADING
773 k_mutex_lock(&dev_data->mutex, K_FOREVER);
774 #endif
775 if (r != 0) {
776 dev_data->waiting_bound[i] =
777 WAITING_BOUND_MSG_EMPTY;
778 if (r < 0) {
779 LOG_ERR("Failed bound, err %d", r);
780 }
781 }
782 }
783 }
784 #ifdef CONFIG_MULTITHREADING
785 k_mutex_unlock(&dev_data->mutex);
786 #endif
787 }
788
789 /* Check if any endpoint is ready to rebound and call the callback if it is. */
790 for (i = 0; i < NUM_EPT; i++) {
791 ept = &dev_data->ept[i];
792 matching_state = atomic_cas(&ept->rebound_state, EPT_REBOUNDING,
793 EPT_NORMAL);
794 if (matching_state) {
795 if (ept->cfg->cb.bound != NULL) {
796 ept->cfg->cb.bound(ept->cfg->priv);
797 }
798 }
799 }
800 }
801
802 /**
803 * Get endpoint from endpoint address. Also validates if the address is correct and
804 * endpoint is in correct state for receiving. If bounding callback was not called yet,
805 * then call it.
806 */
get_ept_and_rx_validate(struct backend_data * dev_data,uint8_t ept_addr)807 static struct ept_data *get_ept_and_rx_validate(struct backend_data *dev_data,
808 uint8_t ept_addr)
809 {
810 struct ept_data *ept;
811 enum ept_bounding_state state;
812
813 if (ept_addr >= NUM_EPT || dev_data->ept_map[ept_addr] >= NUM_EPT) {
814 LOG_ERR("Received invalid endpoint addr %d", ept_addr);
815 return NULL;
816 }
817
818 ept = &dev_data->ept[dev_data->ept_map[ept_addr]];
819
820 state = atomic_get(&ept->state);
821
822 if (state == EPT_READY) {
823 /* Ready state, ensure that it is not deregistered nor rebounding. */
824 if (atomic_get(&ept->rebound_state) != EPT_NORMAL) {
825 return NULL;
826 }
827 } else if (state == EPT_BOUNDING) {
828 /* Endpoint bound callback was not called yet - call it. */
829 atomic_set(&ept->state, EPT_READY);
830 if (ept->cfg->cb.bound != NULL) {
831 ept->cfg->cb.bound(ept->cfg->priv);
832 }
833 } else {
834 LOG_ERR("Invalid state %d of receiving endpoint %d", state, ept->addr);
835 return NULL;
836 }
837
838 return ept;
839 }
840
841 /**
842 * Data message received.
843 */
received_data(struct backend_data * dev_data,size_t rx_block_index,uint8_t ept_addr)844 static int received_data(struct backend_data *dev_data, size_t rx_block_index,
845 uint8_t ept_addr)
846 {
847 const struct icbmsg_config *conf = dev_data->conf;
848 uint8_t *buffer;
849 struct ept_data *ept;
850 size_t size;
851 int bit_val;
852
853 /* Validate. */
854 buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true);
855 ept = get_ept_and_rx_validate(dev_data, ept_addr);
856 if (buffer == NULL || ept == NULL) {
857 LOG_ERR("Received invalid block index %d or addr %d", rx_block_index,
858 ept_addr);
859 return -EINVAL;
860 }
861
862 /* Clear bit. If cleared, specific block will not be hold after the callback. */
863 sys_bitarray_clear_bit(conf->rx_hold_bitmap, rx_block_index);
864
865 /* Call the endpoint callback. It can set the hold bit. */
866 ept->cfg->cb.received(buffer, size, ept->cfg->priv);
867
868 /* If the bit is still cleared, request release of the buffer. */
869 sys_bitarray_test_bit(conf->rx_hold_bitmap, rx_block_index, &bit_val);
870 if (!bit_val) {
871 send_release(dev_data, buffer, MSG_RELEASE_DATA, 0);
872 }
873
874 return 0;
875 }
876
877 /**
878 * Release data message received.
879 */
received_release_data(struct backend_data * dev_data,size_t tx_block_index)880 static int received_release_data(struct backend_data *dev_data, size_t tx_block_index)
881 {
882 const struct icbmsg_config *conf = dev_data->conf;
883 uint8_t *buffer;
884 size_t size;
885 int r;
886
887 /* Validate. */
888 buffer = buffer_from_index_validate(&conf->tx, tx_block_index, &size, false);
889 if (buffer == NULL) {
890 LOG_ERR("Received invalid block index %d", tx_block_index);
891 return -EINVAL;
892 }
893
894 /* Release. */
895 r = release_tx_blocks(dev_data, tx_block_index, size, -1);
896 if (r < 0) {
897 return r;
898 }
899
900 return r;
901 }
902
903 /**
904 * Bound endpoint message received.
905 */
received_bound(struct backend_data * dev_data,size_t rx_block_index,uint8_t ept_addr)906 static int received_bound(struct backend_data *dev_data, size_t rx_block_index,
907 uint8_t ept_addr)
908 {
909 const struct icbmsg_config *conf = dev_data->conf;
910 size_t size;
911 uint8_t *buffer;
912
913 /* Validate */
914 buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true);
915 if (buffer == NULL) {
916 LOG_ERR("Received invalid block index %d", rx_block_index);
917 return -EINVAL;
918 }
919
920 /* Put message to waiting array. */
921 #ifdef CONFIG_MULTITHREADING
922 k_mutex_lock(&dev_data->mutex, K_FOREVER);
923 #endif
924 dev_data->waiting_bound[ept_addr] = rx_block_index;
925 #ifdef CONFIG_MULTITHREADING
926 k_mutex_unlock(&dev_data->mutex);
927 #endif
928
929 #ifdef CONFIG_MULTITHREADING
930 /* Schedule processing the message. */
931 schedule_ept_bound_process(dev_data);
932 #else
933 ept_bound_process(dev_data);
934 #endif
935
936 return 0;
937 }
938
939 /**
940 * Callback called by ICMsg that handles message (data or endpoint bound) received
941 * from the remote.
942 *
943 * @param[in] data Message received from the ICMsg.
944 * @param[in] len Number of bytes of data.
945 * @param[in] priv Opaque pointer to device instance.
946 */
control_received(const void * data,size_t len,void * priv)947 static void control_received(const void *data, size_t len, void *priv)
948 {
949 const struct device *instance = priv;
950 struct backend_data *dev_data = instance->data;
951 const struct control_message *message = (const struct control_message *)data;
952 struct ept_data *ept;
953 uint8_t ept_addr;
954 int r = 0;
955
956 /* Allow messages longer than 3 bytes, e.g. for future protocol versions. */
957 if (len < sizeof(struct control_message)) {
958 r = -EINVAL;
959 goto exit;
960 }
961
962 ept_addr = message->ept_addr;
963 if (ept_addr >= NUM_EPT) {
964 r = -EINVAL;
965 goto exit;
966 }
967
968 switch (message->msg_type) {
969 case MSG_RELEASE_DATA:
970 r = received_release_data(dev_data, message->block_index);
971 break;
972 case MSG_RELEASE_BOUND:
973 r = received_release_data(dev_data, message->block_index);
974 if (r >= 0) {
975 ept = get_ept_and_rx_validate(dev_data, ept_addr);
976 if (ept == NULL) {
977 r = -EINVAL;
978 }
979 }
980 break;
981 case MSG_BOUND:
982 r = received_bound(dev_data, message->block_index, ept_addr);
983 break;
984 case MSG_DATA:
985 r = received_data(dev_data, message->block_index, ept_addr);
986 break;
987 default:
988 /* Silently ignore other messages types. They can be used in future
989 * protocol version.
990 */
991 break;
992 }
993
994 exit:
995 if (r < 0) {
996 LOG_ERR("Failed to receive, err %d", r);
997 }
998 }
999
1000 /**
1001 * Callback called when ICMsg is bound.
1002 */
control_bound(void * priv)1003 static void control_bound(void *priv)
1004 {
1005 const struct device *instance = priv;
1006 struct backend_data *dev_data = instance->data;
1007
1008 /* Set flag that ICMsg is bounded and now, endpoint bounding may start. */
1009 atomic_or(&dev_data->flags, CONTROL_BOUNDED);
1010 #ifdef CONFIG_MULTITHREADING
1011 schedule_ept_bound_process(dev_data);
1012 #else
1013 ept_bound_process(dev_data);
1014 #endif
1015 }
1016
1017 /**
1018 * Open the backend instance callback.
1019 */
open(const struct device * instance)1020 static int open(const struct device *instance)
1021 {
1022 const struct icbmsg_config *conf = instance->config;
1023 struct backend_data *dev_data = instance->data;
1024
1025 static const struct ipc_service_cb cb = {
1026 .bound = control_bound,
1027 .received = control_received,
1028 .error = NULL,
1029 };
1030
1031 LOG_DBG("Open instance 0x%08X, initiator=%d", (uint32_t)instance,
1032 dev_data->is_initiator ? 1 : 0);
1033 LOG_DBG(" TX %d blocks of %d bytes at 0x%08X, max allocable %d bytes",
1034 (uint32_t)conf->tx.block_count,
1035 (uint32_t)conf->tx.block_size,
1036 (uint32_t)conf->tx.blocks_ptr,
1037 (uint32_t)(conf->tx.block_size * conf->tx.block_count -
1038 BLOCK_HEADER_SIZE));
1039 LOG_DBG(" RX %d blocks of %d bytes at 0x%08X, max allocable %d bytes",
1040 (uint32_t)conf->rx.block_count,
1041 (uint32_t)conf->rx.block_size,
1042 (uint32_t)conf->rx.blocks_ptr,
1043 (uint32_t)(conf->rx.block_size * conf->rx.block_count -
1044 BLOCK_HEADER_SIZE));
1045
1046 return icmsg_open(&conf->control_config, &dev_data->control_data, &cb,
1047 (void *)instance);
1048 }
1049
1050 /**
1051 * Endpoint send callback function (with copy).
1052 */
send(const struct device * instance,void * token,const void * msg,size_t len)1053 static int send(const struct device *instance, void *token, const void *msg, size_t len)
1054 {
1055 struct backend_data *dev_data = instance->data;
1056 struct ept_data *ept = token;
1057 uint32_t alloc_size;
1058 uint8_t *buffer;
1059 int r;
1060
1061 /* Allocate the buffer. */
1062 alloc_size = len;
1063 r = alloc_tx_buffer(dev_data, &alloc_size, &buffer, K_NO_WAIT);
1064 if (r < 0) {
1065 return r;
1066 }
1067
1068 /* Copy data to allocated buffer. */
1069 memcpy(buffer, msg, len);
1070
1071 /* Send data message. */
1072 r = send_block(dev_data, MSG_DATA, ept->addr, r, len);
1073 if (r < 0) {
1074 return r;
1075 }
1076
1077 return len;
1078 }
1079
1080 /**
1081 * Backend endpoint registration callback.
1082 */
register_ept(const struct device * instance,void ** token,const struct ipc_ept_cfg * cfg)1083 static int register_ept(const struct device *instance, void **token,
1084 const struct ipc_ept_cfg *cfg)
1085 {
1086 struct backend_data *dev_data = instance->data;
1087 struct ept_data *ept = NULL;
1088 bool matching_state;
1089 int ept_index;
1090 int r = 0;
1091
1092 /* Try to find endpoint to rebound */
1093 for (ept_index = 0; ept_index < NUM_EPT; ept_index++) {
1094 ept = &dev_data->ept[ept_index];
1095 if (ept->cfg == cfg) {
1096 matching_state = atomic_cas(&ept->rebound_state, EPT_DEREGISTERED,
1097 EPT_REBOUNDING);
1098 if (!matching_state) {
1099 return -EINVAL;
1100 }
1101 #ifdef CONFIG_MULTITHREADING
1102 schedule_ept_bound_process(dev_data);
1103 #else
1104 ept_bound_process(dev_data);
1105 #endif
1106 return 0;
1107 }
1108 }
1109
1110 /* Reserve new endpoint index. */
1111 ept_index = atomic_inc(&dev_data->flags) & FLAG_EPT_COUNT_MASK;
1112 if (ept_index >= NUM_EPT) {
1113 LOG_ERR("Too many endpoints");
1114 __ASSERT_NO_MSG(false);
1115 return -ENOMEM;
1116 }
1117
1118 /* Add new endpoint. */
1119 ept = &dev_data->ept[ept_index];
1120 ept->cfg = cfg;
1121 if (dev_data->is_initiator) {
1122 ept->addr = ept_index;
1123 dev_data->ept_map[ept->addr] = ept->addr;
1124 }
1125 atomic_set(&ept->state, EPT_CONFIGURED);
1126
1127 /* Keep endpoint address in token. */
1128 *token = ept;
1129
1130 #ifdef CONFIG_MULTITHREADING
1131 /* Rest of the bounding will be done in the system workqueue. */
1132 schedule_ept_bound_process(dev_data);
1133 #else
1134 ept_bound_process(dev_data);
1135 #endif
1136
1137 return r;
1138 }
1139
1140 /**
1141 * Backend endpoint deregistration callback.
1142 */
deregister_ept(const struct device * instance,void * token)1143 static int deregister_ept(const struct device *instance, void *token)
1144 {
1145 struct ept_data *ept = token;
1146 bool matching_state;
1147
1148 matching_state = atomic_cas(&ept->rebound_state, EPT_NORMAL, EPT_DEREGISTERED);
1149
1150 if (!matching_state) {
1151 return -EINVAL;
1152 }
1153
1154 return 0;
1155 }
1156
1157 /**
1158 * Returns maximum TX buffer size.
1159 */
get_tx_buffer_size(const struct device * instance,void * token)1160 static int get_tx_buffer_size(const struct device *instance, void *token)
1161 {
1162 const struct icbmsg_config *conf = instance->config;
1163
1164 return conf->tx.block_size * conf->tx.block_count - BLOCK_HEADER_SIZE;
1165 }
1166
1167 /**
1168 * Endpoint TX buffer allocation callback for nocopy sending.
1169 */
get_tx_buffer(const struct device * instance,void * token,void ** data,uint32_t * user_len,k_timeout_t wait)1170 static int get_tx_buffer(const struct device *instance, void *token, void **data,
1171 uint32_t *user_len, k_timeout_t wait)
1172 {
1173 struct backend_data *dev_data = instance->data;
1174 int r;
1175
1176 r = alloc_tx_buffer(dev_data, user_len, (uint8_t **)data, wait);
1177 if (r < 0) {
1178 return r;
1179 }
1180 return 0;
1181 }
1182
1183 /**
1184 * Endpoint TX buffer release callback for nocopy sending.
1185 */
drop_tx_buffer(const struct device * instance,void * token,const void * data)1186 static int drop_tx_buffer(const struct device *instance, void *token, const void *data)
1187 {
1188 struct backend_data *dev_data = instance->data;
1189 int r;
1190
1191 r = release_tx_buffer(dev_data, data, -1);
1192 if (r < 0) {
1193 return r;
1194 }
1195
1196 return 0;
1197 }
1198
1199 /**
1200 * Endpoint nocopy sending.
1201 */
send_nocopy(const struct device * instance,void * token,const void * data,size_t len)1202 static int send_nocopy(const struct device *instance, void *token, const void *data,
1203 size_t len)
1204 {
1205 struct backend_data *dev_data = instance->data;
1206 struct ept_data *ept = token;
1207 int r;
1208
1209 /* Actual size may be smaller than requested, so shrink if possible. */
1210 r = release_tx_buffer(dev_data, data, len);
1211 if (r < 0) {
1212 release_tx_buffer(dev_data, data, -1);
1213 return r;
1214 }
1215
1216 return send_block(dev_data, MSG_DATA, ept->addr, r, len);
1217 }
1218
1219 /**
1220 * Holding RX buffer for nocopy receiving.
1221 */
hold_rx_buffer(const struct device * instance,void * token,void * data)1222 static int hold_rx_buffer(const struct device *instance, void *token, void *data)
1223 {
1224 const struct icbmsg_config *conf = instance->config;
1225 int rx_block_index;
1226 uint8_t *buffer = data;
1227
1228 /* Calculate block index and set associated bit. */
1229 rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL);
1230 __ASSERT_NO_MSG(rx_block_index >= 0);
1231 return sys_bitarray_set_bit(conf->rx_hold_bitmap, rx_block_index);
1232 }
1233
1234 /**
1235 * Release RX buffer that was previously held.
1236 */
release_rx_buffer(const struct device * instance,void * token,void * data)1237 static int release_rx_buffer(const struct device *instance, void *token, void *data)
1238 {
1239 struct backend_data *dev_data = instance->data;
1240
1241 return send_release(dev_data, (uint8_t *)data, MSG_RELEASE_DATA, 0);
1242 }
1243
1244 /**
1245 * Backend device initialization.
1246 */
backend_init(const struct device * instance)1247 static int backend_init(const struct device *instance)
1248 {
1249 MAYBE_CONST struct icbmsg_config *conf = (struct icbmsg_config *)instance->config;
1250 struct backend_data *dev_data = instance->data;
1251 #ifdef CONFIG_MULTITHREADING
1252 static K_THREAD_STACK_DEFINE(ep_bound_work_q_stack, EP_BOUND_WORK_Q_STACK_SIZE);
1253 static bool is_work_q_started;
1254
1255 #if defined(CONFIG_ARCH_POSIX)
1256 native_emb_addr_remap((void **)&conf->tx.blocks_ptr);
1257 native_emb_addr_remap((void **)&conf->rx.blocks_ptr);
1258 #endif
1259
1260 if (!is_work_q_started) {
1261 k_work_queue_init(&ep_bound_work_q);
1262 k_work_queue_start(&ep_bound_work_q, ep_bound_work_q_stack,
1263 K_THREAD_STACK_SIZEOF(ep_bound_work_q_stack),
1264 EP_BOUND_WORK_Q_PRIORITY, NULL);
1265
1266 is_work_q_started = true;
1267 }
1268 #endif
1269
1270 dev_data->conf = conf;
1271 dev_data->is_initiator = (conf->rx.blocks_ptr < conf->tx.blocks_ptr);
1272 #ifdef CONFIG_MULTITHREADING
1273 k_mutex_init(&dev_data->mutex);
1274 k_work_init(&dev_data->ep_bound_work, ept_bound_process);
1275 k_sem_init(&dev_data->block_wait_sem, 0, 1);
1276 #endif
1277 memset(&dev_data->waiting_bound, 0xFF, sizeof(dev_data->waiting_bound));
1278 memset(&dev_data->ept_map, EPT_ADDR_INVALID, sizeof(dev_data->ept_map));
1279 return 0;
1280 }
1281
1282 /**
1283 * IPC service backend callbacks.
1284 */
1285 const static struct ipc_service_backend backend_ops = {
1286 .open_instance = open,
1287 .close_instance = NULL, /* not implemented */
1288 .send = send,
1289 .register_endpoint = register_ept,
1290 .deregister_endpoint = deregister_ept,
1291 .get_tx_buffer_size = get_tx_buffer_size,
1292 .get_tx_buffer = get_tx_buffer,
1293 .drop_tx_buffer = drop_tx_buffer,
1294 .send_nocopy = send_nocopy,
1295 .hold_rx_buffer = hold_rx_buffer,
1296 .release_rx_buffer = release_rx_buffer,
1297 };
1298
1299 /**
1300 * Required block alignment.
1301 */
1302 #define BLOCK_ALIGNMENT sizeof(uint32_t)
1303
1304 /**
1305 * Number of bytes per each ICMsg message. It is used to calculate size of ICMsg area.
1306 */
1307 #define BYTES_PER_ICMSG_MESSAGE (ROUND_UP(sizeof(struct control_message), \
1308 sizeof(void *)) + PBUF_PACKET_LEN_SZ)
1309
1310 /**
1311 * Maximum ICMsg overhead. It is used to calculate size of ICMsg area.
1312 */
1313 #define ICMSG_BUFFER_OVERHEAD(i) \
1314 (PBUF_HEADER_OVERHEAD(GET_CACHE_ALIGNMENT(i)) + 2 * BYTES_PER_ICMSG_MESSAGE)
1315
1316 /**
1317 * Returns required data cache alignment for instance "i".
1318 */
1319 #define GET_CACHE_ALIGNMENT(i) \
1320 MAX(BLOCK_ALIGNMENT, DT_INST_PROP_OR(i, dcache_alignment, 0))
1321
1322 /**
1323 * Calculates minimum size required for ICMsg region for specific number of local
1324 * and remote blocks. The minimum size ensures that ICMsg queue is will never overflow
1325 * because it can hold data message for each local block and release message
1326 * for each remote block.
1327 */
1328 #define GET_ICMSG_MIN_SIZE(i, local_blocks, remote_blocks) ROUND_UP( \
1329 (ICMSG_BUFFER_OVERHEAD(i) + BYTES_PER_ICMSG_MESSAGE * \
1330 (local_blocks + remote_blocks)), GET_CACHE_ALIGNMENT(i))
1331
1332 /**
1333 * Calculate aligned block size by evenly dividing remaining space after removing
1334 * the space for ICMsg.
1335 */
1336 #define GET_BLOCK_SIZE(i, total_size, local_blocks, remote_blocks) ROUND_DOWN( \
1337 ((total_size) - GET_ICMSG_MIN_SIZE(i, (local_blocks), (remote_blocks))) / \
1338 (local_blocks), BLOCK_ALIGNMENT)
1339
1340 /**
1341 * Calculate offset where area for blocks starts which is just after the ICMsg.
1342 */
1343 #define GET_BLOCKS_OFFSET(i, total_size, local_blocks, remote_blocks) \
1344 ((total_size) - GET_BLOCK_SIZE(i, (total_size), (local_blocks), \
1345 (remote_blocks)) * (local_blocks))
1346
1347 /**
1348 * Return shared memory start address aligned to block alignment and cache line.
1349 */
1350 #define GET_MEM_ADDR_INST(i, direction) \
1351 ROUND_UP(DT_REG_ADDR(DT_INST_PHANDLE(i, direction##_region)), \
1352 GET_CACHE_ALIGNMENT(i))
1353
1354 /**
1355 * Return shared memory end address aligned to block alignment and cache line.
1356 */
1357 #define GET_MEM_END_INST(i, direction) \
1358 ROUND_DOWN(DT_REG_ADDR(DT_INST_PHANDLE(i, direction##_region)) + \
1359 DT_REG_SIZE(DT_INST_PHANDLE(i, direction##_region)), \
1360 GET_CACHE_ALIGNMENT(i))
1361
1362 /**
1363 * Return shared memory size aligned to block alignment and cache line.
1364 */
1365 #define GET_MEM_SIZE_INST(i, direction) \
1366 (GET_MEM_END_INST(i, direction) - GET_MEM_ADDR_INST(i, direction))
1367
1368 /**
1369 * Returns GET_ICMSG_SIZE, but for specific instance and direction.
1370 * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1371 * or "rx, tx".
1372 */
1373 #define GET_ICMSG_SIZE_INST(i, loc, rem) \
1374 GET_BLOCKS_OFFSET( \
1375 i, \
1376 GET_MEM_SIZE_INST(i, loc), \
1377 DT_INST_PROP(i, loc##_blocks), \
1378 DT_INST_PROP(i, rem##_blocks))
1379
1380 /**
1381 * Returns address where area for blocks starts for specific instance and direction.
1382 * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1383 * or "rx, tx".
1384 */
1385 #define GET_BLOCKS_ADDR_INST(i, loc, rem) \
1386 GET_MEM_ADDR_INST(i, loc) + \
1387 GET_BLOCKS_OFFSET( \
1388 i, \
1389 GET_MEM_SIZE_INST(i, loc), \
1390 DT_INST_PROP(i, loc##_blocks), \
1391 DT_INST_PROP(i, rem##_blocks))
1392
1393 /**
1394 * Returns block size for specific instance and direction.
1395 * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1396 * or "rx, tx".
1397 */
1398 #define GET_BLOCK_SIZE_INST(i, loc, rem) \
1399 GET_BLOCK_SIZE( \
1400 i, \
1401 GET_MEM_SIZE_INST(i, loc), \
1402 DT_INST_PROP(i, loc##_blocks), \
1403 DT_INST_PROP(i, rem##_blocks))
1404
1405 #define DEFINE_BACKEND_DEVICE(i) \
1406 SYS_BITARRAY_DEFINE_STATIC(tx_usage_bitmap_##i, DT_INST_PROP(i, tx_blocks)); \
1407 SYS_BITARRAY_DEFINE_STATIC(rx_hold_bitmap_##i, DT_INST_PROP(i, rx_blocks)); \
1408 PBUF_DEFINE(tx_icbmsg_pb_##i, \
1409 GET_MEM_ADDR_INST(i, tx), \
1410 GET_ICMSG_SIZE_INST(i, tx, rx), \
1411 GET_CACHE_ALIGNMENT(i)); \
1412 PBUF_DEFINE(rx_icbmsg_pb_##i, \
1413 GET_MEM_ADDR_INST(i, rx), \
1414 GET_ICMSG_SIZE_INST(i, rx, tx), \
1415 GET_CACHE_ALIGNMENT(i)); \
1416 static struct backend_data backend_data_##i = { \
1417 .control_data = { \
1418 .tx_pb = &tx_icbmsg_pb_##i, \
1419 .rx_pb = &rx_icbmsg_pb_##i, \
1420 } \
1421 }; \
1422 static MAYBE_CONST struct icbmsg_config backend_config_##i = \
1423 { \
1424 .control_config = { \
1425 .mbox_tx = MBOX_DT_SPEC_INST_GET(i, tx), \
1426 .mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
1427 }, \
1428 .tx = { \
1429 .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, tx, rx), \
1430 .block_count = DT_INST_PROP(i, tx_blocks), \
1431 .block_size = GET_BLOCK_SIZE_INST(i, tx, rx), \
1432 }, \
1433 .rx = { \
1434 .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, rx, tx), \
1435 .block_count = DT_INST_PROP(i, rx_blocks), \
1436 .block_size = GET_BLOCK_SIZE_INST(i, rx, tx), \
1437 }, \
1438 .tx_usage_bitmap = &tx_usage_bitmap_##i, \
1439 .rx_hold_bitmap = &rx_hold_bitmap_##i, \
1440 }; \
1441 BUILD_ASSERT(IS_POWER_OF_TWO(GET_CACHE_ALIGNMENT(i)), \
1442 "This module supports only power of two cache alignment"); \
1443 BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, tx, rx) >= BLOCK_ALIGNMENT) && \
1444 (GET_BLOCK_SIZE_INST(i, tx, rx) < \
1445 GET_MEM_SIZE_INST(i, tx)), \
1446 "TX region is too small for provided number of blocks"); \
1447 BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, rx, tx) >= BLOCK_ALIGNMENT) && \
1448 (GET_BLOCK_SIZE_INST(i, rx, tx) < \
1449 GET_MEM_SIZE_INST(i, rx)), \
1450 "RX region is too small for provided number of blocks"); \
1451 BUILD_ASSERT(DT_INST_PROP(i, rx_blocks) <= 256, "Too many RX blocks"); \
1452 BUILD_ASSERT(DT_INST_PROP(i, tx_blocks) <= 256, "Too many TX blocks"); \
1453 DEVICE_DT_INST_DEFINE(i, \
1454 &backend_init, \
1455 NULL, \
1456 &backend_data_##i, \
1457 &backend_config_##i, \
1458 POST_KERNEL, \
1459 CONFIG_IPC_SERVICE_REG_BACKEND_PRIORITY, \
1460 &backend_ops);
1461
1462 DT_INST_FOREACH_STATUS_OKAY(DEFINE_BACKEND_DEVICE)
1463