Lines Matching +full:conf +full:- +full:rx

4  * SPDX-License-Identifier: Apache-2.0
14 * --------------------------
16 * Single channel (RX or TX) of the shared memory is divided into two areas: ICMsg area
17 * followed by Blocks area. ICMsg is used to send and receive short 3-byte messages.
22 * +------------+-------------+
24 * +------------+-------------+
27 * +-----------+-----------+-----------+-----------+- -+-----------+
28 * | Block 0 | Block 1 | Block 2 | Block 3 | ... | Block N-1 |
29 * +-----------+-----------+-----------+-----------+- -+-----------+
32 * +------+--------------------------------+---------+
34 * +------+--------------------------------+---------+
41 * ----------------
43 * ICMsg is used to send and receive small 3-byte control messages.
45 * - Send data
49 * - Release data
55 * - Bound endpoint
58 * null-terminated endpoint name.
60 * - Release bound endpoint
67 * ------------------
165 struct channel_config rx; /* RX channel config. */ member
182 const struct icbmsg_config *conf;/* Backend instance config. */ member
227 * Calculate pointer to block from its index and channel configuration (RX or TX).
233 return (struct block_content *)(ch_conf->blocks_ptr + in block_from_index()
234 block_index * ch_conf->block_size); in block_from_index()
238 * Calculate pointer to data buffer from block index and channel configuration (RX or TX).
259 if (block_index >= ch_conf->block_count) { in buffer_from_index_validate()
271 allocable_size = ch_conf->block_count * ch_conf->block_size; in buffer_from_index_validate()
272 end_ptr = ch_conf->blocks_ptr + allocable_size; in buffer_from_index_validate()
273 buffer_size = block->header.size; in buffer_from_index_validate()
275 if ((buffer_size > allocable_size - BLOCK_HEADER_SIZE) || in buffer_from_index_validate()
276 (&block->data[buffer_size] > end_ptr)) { in buffer_from_index_validate()
283 sys_cache_data_invd_range(block->data, buffer_size); in buffer_from_index_validate()
288 return block->data; in buffer_from_index_validate()
300 * @retval -EINVAL The buffer is not correct
308 block_index = (buffer - ch_conf->blocks_ptr) / ch_conf->block_size; in buffer_to_index_validate()
314 return -EINVAL; in buffer_to_index_validate()
331 * @retval -ENOMEM If requested size is bigger than entire allocable space, or
333 * @retval -EAGAIN If timeout occurred.
338 const struct icbmsg_config *conf = dev_data->conf; in alloc_tx_buffer() local
340 size_t num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size); in alloc_tx_buffer()
353 r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, in alloc_tx_buffer()
355 if (r == -ENOSPC && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) { in alloc_tx_buffer()
359 r = k_sem_take(&dev_data->block_wait_sem, timeout); in alloc_tx_buffer()
374 k_sem_give(&dev_data->block_wait_sem); in alloc_tx_buffer()
378 r = sys_bitarray_alloc(conf->tx_usage_bitmap, num_blocks, &tx_block_index); in alloc_tx_buffer()
382 if (r != -ENOSPC && r != -EAGAIN) { in alloc_tx_buffer()
384 /* Only -EINVAL is allowed in this place. Any other code in alloc_tx_buffer()
387 __ASSERT_NO_MSG(r == -EINVAL); in alloc_tx_buffer()
390 if (r == -ENOSPC || r == -EINVAL) { in alloc_tx_buffer()
391 /* IPC service require -ENOMEM error in case of no memory. */ in alloc_tx_buffer()
392 r = -ENOMEM; in alloc_tx_buffer()
400 for (next_bit = tx_block_index + 1; next_bit < conf->tx.block_count; in alloc_tx_buffer()
402 r = sys_bitarray_test_and_set_bit(conf->tx_usage_bitmap, next_bit, in alloc_tx_buffer()
410 num_blocks = next_bit - tx_block_index; in alloc_tx_buffer()
414 *size = conf->tx.block_size * num_blocks - BLOCK_HEADER_SIZE; in alloc_tx_buffer()
415 block = block_from_index(&conf->tx, tx_block_index); in alloc_tx_buffer()
416 block->header.size = *size; in alloc_tx_buffer()
417 *buffer = block->data; in alloc_tx_buffer()
432 * @retval -EINVAL If invalid buffer was provided or size is greater than already
438 const struct icbmsg_config *conf = dev_data->conf; in release_tx_blocks() local
449 num_blocks = DIV_ROUND_UP(total_size, conf->tx.block_size); in release_tx_blocks()
454 new_num_blocks = DIV_ROUND_UP(new_total_size, conf->tx.block_size); in release_tx_blocks()
458 return -EINVAL; in release_tx_blocks()
461 block = block_from_index(&conf->tx, tx_block_index); in release_tx_blocks()
462 block->header.size = new_size; in release_tx_blocks()
464 num_blocks = num_blocks - new_num_blocks; in release_tx_blocks()
472 r = sys_bitarray_free(conf->tx_usage_bitmap, num_blocks, in release_tx_blocks()
481 k_sem_give(&dev_data->block_wait_sem); in release_tx_blocks()
496 * @retval -EINVAL If invalid buffer was provided or size is greater than already
502 const struct icbmsg_config *conf = dev_data->conf; in release_tx_buffer() local
506 tx_block_index = buffer_to_index_validate(&conf->tx, buffer, &size); in release_tx_buffer()
522 const struct icbmsg_config *conf = dev_data->conf; in send_control_message() local
531 k_mutex_lock(&dev_data->mutex, K_FOREVER); in send_control_message()
533 r = icmsg_send(&conf->control_config, &dev_data->control_data, &message, in send_control_message()
536 k_mutex_unlock(&dev_data->mutex); in send_control_message()
556 const struct icbmsg_config *conf = dev_data->conf; in send_release() local
559 rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL); in send_release()
586 block = block_from_index(&dev_data->conf->tx, tx_block_index); in send_block()
588 block->header.size = size; in send_block()
594 release_tx_blocks(dev_data, tx_block_index, size, -1); in send_block()
606 * @return Found endpoint index or -ENOENT if not found.
610 const struct channel_config *rx_conf = &dev_data->conf->rx; in find_ept_by_name()
611 const char *buffer_end = (const char *)rx_conf->blocks_ptr + in find_ept_by_name()
612 rx_conf->block_count * rx_conf->block_size; in find_ept_by_name()
621 name_size = strnlen(name, buffer_end - name - 1) + 1; in find_ept_by_name()
624 ept = &dev_data->ept[i]; in find_ept_by_name()
625 if (atomic_get(&ept->state) == EPT_CONFIGURED && in find_ept_by_name()
626 strncmp(ept->cfg->name, name, name_size) == 0) { in find_ept_by_name()
631 return -ENOENT; in find_ept_by_name()
641 * @return negative error code or non-negative search result.
648 const struct icbmsg_config *conf = dev_data->conf; in match_bound_msg() local
657 block = block_from_index(&conf->rx, rx_block_index); in match_bound_msg()
658 buffer = block->data; in match_bound_msg()
665 ept = &dev_data->ept[ept_index]; in match_bound_msg()
666 ept->addr = ept_addr; in match_bound_msg()
667 dev_data->ept_map[ept->addr] = ept_index; in match_bound_msg()
668 valid_state = atomic_cas(&ept->state, EPT_CONFIGURED, EPT_READY); in match_bound_msg()
671 return -EINVAL; in match_bound_msg()
675 if (ept->cfg->cb.bound != NULL) { in match_bound_msg()
676 ept->cfg->cb.bound(ept->cfg->priv); in match_bound_msg()
693 * @return non-negative value in case of success or negative error code.
702 msg_len = strlen(ept->cfg->name) + 1; in send_bound_message()
706 strcpy(buffer, ept->cfg->name); in send_bound_message()
707 r = send_block(dev_data, MSG_BOUND, ept->addr, r, msg_len); in send_bound_message()
719 k_work_submit_to_queue(&ep_bound_work_q, &dev_data->ep_bound_work); in schedule_ept_bound_process()
742 if (!(atomic_get(&dev_data->flags) & CONTROL_BOUNDED)) { in ept_bound_process()
746 if (dev_data->is_initiator) { in ept_bound_process()
749 ept = &dev_data->ept[i]; in ept_bound_process()
750 matching_state = atomic_cas(&ept->state, EPT_CONFIGURED, in ept_bound_process()
755 atomic_set(&ept->state, EPT_UNCONFIGURED); in ept_bound_process()
763 k_mutex_lock(&dev_data->mutex, K_FOREVER); in ept_bound_process()
766 if (dev_data->waiting_bound[i] != WAITING_BOUND_MSG_EMPTY) { in ept_bound_process()
768 k_mutex_unlock(&dev_data->mutex); in ept_bound_process()
771 dev_data->waiting_bound[i], i); in ept_bound_process()
773 k_mutex_lock(&dev_data->mutex, K_FOREVER); in ept_bound_process()
776 dev_data->waiting_bound[i] = in ept_bound_process()
785 k_mutex_unlock(&dev_data->mutex); in ept_bound_process()
791 ept = &dev_data->ept[i]; in ept_bound_process()
792 matching_state = atomic_cas(&ept->rebound_state, EPT_REBOUNDING, in ept_bound_process()
795 if (ept->cfg->cb.bound != NULL) { in ept_bound_process()
796 ept->cfg->cb.bound(ept->cfg->priv); in ept_bound_process()
813 if (ept_addr >= NUM_EPT || dev_data->ept_map[ept_addr] >= NUM_EPT) { in get_ept_and_rx_validate()
818 ept = &dev_data->ept[dev_data->ept_map[ept_addr]]; in get_ept_and_rx_validate()
820 state = atomic_get(&ept->state); in get_ept_and_rx_validate()
824 if (atomic_get(&ept->rebound_state) != EPT_NORMAL) { in get_ept_and_rx_validate()
828 /* Endpoint bound callback was not called yet - call it. */ in get_ept_and_rx_validate()
829 atomic_set(&ept->state, EPT_READY); in get_ept_and_rx_validate()
830 if (ept->cfg->cb.bound != NULL) { in get_ept_and_rx_validate()
831 ept->cfg->cb.bound(ept->cfg->priv); in get_ept_and_rx_validate()
834 LOG_ERR("Invalid state %d of receiving endpoint %d", state, ept->addr); in get_ept_and_rx_validate()
847 const struct icbmsg_config *conf = dev_data->conf; in received_data() local
854 buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true); in received_data()
859 return -EINVAL; in received_data()
863 sys_bitarray_clear_bit(conf->rx_hold_bitmap, rx_block_index); in received_data()
866 ept->cfg->cb.received(buffer, size, ept->cfg->priv); in received_data()
869 sys_bitarray_test_bit(conf->rx_hold_bitmap, rx_block_index, &bit_val); in received_data()
882 const struct icbmsg_config *conf = dev_data->conf; in received_release_data() local
888 buffer = buffer_from_index_validate(&conf->tx, tx_block_index, &size, false); in received_release_data()
891 return -EINVAL; in received_release_data()
895 r = release_tx_blocks(dev_data, tx_block_index, size, -1); in received_release_data()
909 const struct icbmsg_config *conf = dev_data->conf; in received_bound() local
914 buffer = buffer_from_index_validate(&conf->rx, rx_block_index, &size, true); in received_bound()
917 return -EINVAL; in received_bound()
922 k_mutex_lock(&dev_data->mutex, K_FOREVER); in received_bound()
924 dev_data->waiting_bound[ept_addr] = rx_block_index; in received_bound()
926 k_mutex_unlock(&dev_data->mutex); in received_bound()
950 struct backend_data *dev_data = instance->data; in control_received()
958 r = -EINVAL; in control_received()
962 ept_addr = message->ept_addr; in control_received()
964 r = -EINVAL; in control_received()
968 switch (message->msg_type) { in control_received()
970 r = received_release_data(dev_data, message->block_index); in control_received()
973 r = received_release_data(dev_data, message->block_index); in control_received()
977 r = -EINVAL; in control_received()
982 r = received_bound(dev_data, message->block_index, ept_addr); in control_received()
985 r = received_data(dev_data, message->block_index, ept_addr); in control_received()
1006 struct backend_data *dev_data = instance->data; in control_bound()
1009 atomic_or(&dev_data->flags, CONTROL_BOUNDED); in control_bound()
1022 const struct icbmsg_config *conf = instance->config; in open() local
1023 struct backend_data *dev_data = instance->data; in open()
1032 dev_data->is_initiator ? 1 : 0); in open()
1034 (uint32_t)conf->tx.block_count, in open()
1035 (uint32_t)conf->tx.block_size, in open()
1036 (uint32_t)conf->tx.blocks_ptr, in open()
1037 (uint32_t)(conf->tx.block_size * conf->tx.block_count - in open()
1039 LOG_DBG(" RX %d blocks of %d bytes at 0x%08X, max allocable %d bytes", in open()
1040 (uint32_t)conf->rx.block_count, in open()
1041 (uint32_t)conf->rx.block_size, in open()
1042 (uint32_t)conf->rx.blocks_ptr, in open()
1043 (uint32_t)(conf->rx.block_size * conf->rx.block_count - in open()
1046 return icmsg_open(&conf->control_config, &dev_data->control_data, &cb, in open()
1055 struct backend_data *dev_data = instance->data; in send()
1072 r = send_block(dev_data, MSG_DATA, ept->addr, r, len); in send()
1086 struct backend_data *dev_data = instance->data; in register_ept()
1094 ept = &dev_data->ept[ept_index]; in register_ept()
1095 if (ept->cfg == cfg) { in register_ept()
1096 matching_state = atomic_cas(&ept->rebound_state, EPT_DEREGISTERED, in register_ept()
1099 return -EINVAL; in register_ept()
1111 ept_index = atomic_inc(&dev_data->flags) & FLAG_EPT_COUNT_MASK; in register_ept()
1115 return -ENOMEM; in register_ept()
1119 ept = &dev_data->ept[ept_index]; in register_ept()
1120 ept->cfg = cfg; in register_ept()
1121 if (dev_data->is_initiator) { in register_ept()
1122 ept->addr = ept_index; in register_ept()
1123 dev_data->ept_map[ept->addr] = ept->addr; in register_ept()
1125 atomic_set(&ept->state, EPT_CONFIGURED); in register_ept()
1148 matching_state = atomic_cas(&ept->rebound_state, EPT_NORMAL, EPT_DEREGISTERED); in deregister_ept()
1151 return -EINVAL; in deregister_ept()
1162 const struct icbmsg_config *conf = instance->config; in get_tx_buffer_size() local
1164 return conf->tx.block_size * conf->tx.block_count - BLOCK_HEADER_SIZE; in get_tx_buffer_size()
1173 struct backend_data *dev_data = instance->data; in get_tx_buffer()
1188 struct backend_data *dev_data = instance->data; in drop_tx_buffer()
1191 r = release_tx_buffer(dev_data, data, -1); in drop_tx_buffer()
1205 struct backend_data *dev_data = instance->data; in send_nocopy()
1212 release_tx_buffer(dev_data, data, -1); in send_nocopy()
1216 return send_block(dev_data, MSG_DATA, ept->addr, r, len); in send_nocopy()
1220 * Holding RX buffer for nocopy receiving.
1224 const struct icbmsg_config *conf = instance->config; in hold_rx_buffer() local
1229 rx_block_index = buffer_to_index_validate(&conf->rx, buffer, NULL); in hold_rx_buffer()
1231 return sys_bitarray_set_bit(conf->rx_hold_bitmap, rx_block_index); in hold_rx_buffer()
1235 * Release RX buffer that was previously held.
1239 struct backend_data *dev_data = instance->data; in release_rx_buffer()
1249 MAYBE_CONST struct icbmsg_config *conf = (struct icbmsg_config *)instance->config; in backend_init() local
1250 struct backend_data *dev_data = instance->data; in backend_init()
1256 native_emb_addr_remap((void **)&conf->tx.blocks_ptr); in backend_init()
1257 native_emb_addr_remap((void **)&conf->rx.blocks_ptr); in backend_init()
1270 dev_data->conf = conf; in backend_init()
1271 dev_data->is_initiator = (conf->rx.blocks_ptr < conf->tx.blocks_ptr); in backend_init()
1273 k_mutex_init(&dev_data->mutex); in backend_init()
1274 k_work_init(&dev_data->ep_bound_work, ept_bound_process); in backend_init()
1275 k_sem_init(&dev_data->block_wait_sem, 0, 1); in backend_init()
1277 memset(&dev_data->waiting_bound, 0xFF, sizeof(dev_data->waiting_bound)); in backend_init()
1278 memset(&dev_data->ept_map, EPT_ADDR_INVALID, sizeof(dev_data->ept_map)); in backend_init()
1337 ((total_size) - GET_ICMSG_MIN_SIZE(i, (local_blocks), (remote_blocks))) / \
1344 ((total_size) - GET_BLOCK_SIZE(i, (total_size), (local_blocks), \
1366 (GET_MEM_END_INST(i, direction) - GET_MEM_ADDR_INST(i, direction))
1370 * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1371 * or "rx, tx".
1382 * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1383 * or "rx, tx".
1395 * 'loc' and 'rem' parameters tells the direction. They can be either "tx, rx"
1396 * or "rx, tx".
1410 GET_ICMSG_SIZE_INST(i, tx, rx), \
1413 GET_MEM_ADDR_INST(i, rx), \
1414 GET_ICMSG_SIZE_INST(i, rx, tx), \
1426 .mbox_rx = MBOX_DT_SPEC_INST_GET(i, rx), \
1429 .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, tx, rx), \
1431 .block_size = GET_BLOCK_SIZE_INST(i, tx, rx), \
1433 .rx = { \
1434 .blocks_ptr = (uint8_t *)GET_BLOCKS_ADDR_INST(i, rx, tx), \
1436 .block_size = GET_BLOCK_SIZE_INST(i, rx, tx), \
1443 BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, tx, rx) >= BLOCK_ALIGNMENT) && \
1444 (GET_BLOCK_SIZE_INST(i, tx, rx) < \
1447 BUILD_ASSERT((GET_BLOCK_SIZE_INST(i, rx, tx) >= BLOCK_ALIGNMENT) && \
1448 (GET_BLOCK_SIZE_INST(i, rx, tx) < \
1449 GET_MEM_SIZE_INST(i, rx)), \
1450 "RX region is too small for provided number of blocks"); \
1451 BUILD_ASSERT(DT_INST_PROP(i, rx_blocks) <= 256, "Too many RX blocks"); \