Lines Matching +full:concat +full:- +full:buf +full:- +full:size

4  * SPDX-License-Identifier: Apache-2.0
9 * @brief Real-Time IO device API for moving bytes with low effort
126 * -ECANCELED as the result.
311 const uint8_t *buf; /**< Buffer to write from */ member
317 uint8_t *buf; /**< Buffer to read into */ member
323 uint8_t buf[7]; /**< Tiny buffer */ member
356 /* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
444 * @brief Get the mempool block size of the RTIO context
447 * @return The size of each block in the context's mempool
456 if (r == NULL || r->block_pool == NULL) { in rtio_mempool_block_size()
459 return BIT(r->block_pool->info.blk_sz_shift); in rtio_mempool_block_size()
474 struct sys_mem_blocks *mem_pool = r->block_pool; in __rtio_compute_mempool_block_index()
477 uintptr_t buff = (uintptr_t)mem_pool->buffer; in __rtio_compute_mempool_block_index()
478 uint32_t buff_size = mem_pool->info.num_blocks * block_size; in __rtio_compute_mempool_block_index()
483 return (addr - buff) / block_size; in __rtio_compute_mempool_block_index()
566 sqe->op = RTIO_OP_NOP; in rtio_sqe_prep_nop()
567 sqe->iodev = iodev; in rtio_sqe_prep_nop()
568 sqe->userdata = userdata; in rtio_sqe_prep_nop()
577 uint8_t *buf, in rtio_sqe_prep_read() argument
582 sqe->op = RTIO_OP_RX; in rtio_sqe_prep_read()
583 sqe->prio = prio; in rtio_sqe_prep_read()
584 sqe->iodev = iodev; in rtio_sqe_prep_read()
585 sqe->rx.buf_len = len; in rtio_sqe_prep_read()
586 sqe->rx.buf = buf; in rtio_sqe_prep_read()
587 sqe->userdata = userdata; in rtio_sqe_prep_read()
600 sqe->flags = RTIO_SQE_MEMPOOL_BUFFER; in rtio_sqe_prep_read_with_pool()
608 sqe->flags |= RTIO_SQE_MULTISHOT; in rtio_sqe_prep_read_multishot()
617 const uint8_t *buf, in rtio_sqe_prep_write() argument
622 sqe->op = RTIO_OP_TX; in rtio_sqe_prep_write()
623 sqe->prio = prio; in rtio_sqe_prep_write()
624 sqe->iodev = iodev; in rtio_sqe_prep_write()
625 sqe->tx.buf_len = len; in rtio_sqe_prep_write()
626 sqe->tx.buf = buf; in rtio_sqe_prep_write()
627 sqe->userdata = userdata; in rtio_sqe_prep_write()
635 * within the specified size of a rtio_sqe.
647 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf)); in rtio_sqe_prep_tiny_write()
650 sqe->op = RTIO_OP_TINY_TX; in rtio_sqe_prep_tiny_write()
651 sqe->prio = prio; in rtio_sqe_prep_tiny_write()
652 sqe->iodev = iodev; in rtio_sqe_prep_tiny_write()
653 sqe->tiny_tx.buf_len = tiny_write_len; in rtio_sqe_prep_tiny_write()
654 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len); in rtio_sqe_prep_tiny_write()
655 sqe->userdata = userdata; in rtio_sqe_prep_tiny_write()
672 sqe->op = RTIO_OP_CALLBACK; in rtio_sqe_prep_callback()
673 sqe->prio = 0; in rtio_sqe_prep_callback()
674 sqe->iodev = NULL; in rtio_sqe_prep_callback()
675 sqe->callback.callback = callback; in rtio_sqe_prep_callback()
676 sqe->callback.arg0 = arg0; in rtio_sqe_prep_callback()
677 sqe->userdata = userdata; in rtio_sqe_prep_callback()
696 sqe->flags |= RTIO_SQE_NO_RESPONSE; in rtio_sqe_prep_callback_no_cqe()
711 sqe->op = RTIO_OP_TXRX; in rtio_sqe_prep_transceive()
712 sqe->prio = prio; in rtio_sqe_prep_transceive()
713 sqe->iodev = iodev; in rtio_sqe_prep_transceive()
714 sqe->txrx.buf_len = buf_len; in rtio_sqe_prep_transceive()
715 sqe->txrx.tx_buf = tx_buf; in rtio_sqe_prep_transceive()
716 sqe->txrx.rx_buf = rx_buf; in rtio_sqe_prep_transceive()
717 sqe->userdata = userdata; in rtio_sqe_prep_transceive()
722 struct mpsc_node *node = mpsc_pop(&pool->free_q); in rtio_sqe_pool_alloc()
730 pool->pool_free--; in rtio_sqe_pool_alloc()
737 mpsc_push(&pool->free_q, &iodev_sqe->q); in rtio_sqe_pool_free()
739 pool->pool_free++; in rtio_sqe_pool_free()
744 struct mpsc_node *node = mpsc_pop(&pool->free_q); in rtio_cqe_pool_alloc()
754 pool->pool_free--; in rtio_cqe_pool_alloc()
761 mpsc_push(&pool->free_q, &cqe->q); in rtio_cqe_pool_free()
763 pool->pool_free++; in rtio_cqe_pool_free()
767 size_t max_sz, uint8_t **buf, uint32_t *buf_len) in rtio_block_pool_alloc() argument
773 ARG_UNUSED(buf); in rtio_block_pool_alloc()
775 return -ENOTSUP; in rtio_block_pool_alloc()
780 /* Not every context has a block pool and the block size may return 0 in in rtio_block_pool_alloc()
784 return -ENOMEM; in rtio_block_pool_alloc()
789 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf); in rtio_block_pool_alloc()
800 bytes -= block_size; in rtio_block_pool_alloc()
803 return -ENOMEM; in rtio_block_pool_alloc()
807 static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len) in rtio_block_pool_free() argument
811 ARG_UNUSED(buf); in rtio_block_pool_free()
814 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift; in rtio_block_pool_free()
816 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks); in rtio_block_pool_free()
821 /* clang-format off */
837 static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz]; \
842 .pool = CONCAT(_sqe_pool_, name), \
847 static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz]; \
852 .pool = CONCAT(_cqe_pool_, name), \
879 CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)]; \
881 CONCAT(_block_pool_, name), RTIO_DMEM)
885 (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT))) \
887 (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT))) \
889 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),)) \
891 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
905 * @param sq_sz Size of the submission queue entry pool
906 * @param cq_sz Size of the completion queue entry pool
909 Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz); \
910 Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz); \
911 Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool), \
912 &CONCAT(name, _cqe_pool), NULL)
914 /* clang-format on */
920 * @param sq_sz Size of the submission queue, must be power of 2
921 * @param cq_sz Size of the completion queue, must be power of 2
932 /* clang-format on */
943 return r->sqe_pool->pool_free; in rtio_sqe_acquirable()
956 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) { in rtio_txn_next()
957 return iodev_sqe->next; in rtio_txn_next()
974 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) { in rtio_chain_next()
975 return iodev_sqe->next; in rtio_chain_next()
991 return iodev_sqe->next; in rtio_iodev_sqe_next()
1004 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool); in rtio_sqe_acquire()
1010 mpsc_push(&r->sq, &iodev_sqe->q); in rtio_sqe_acquire()
1012 return &iodev_sqe->sqe; in rtio_sqe_acquire()
1023 struct mpsc_node *node = mpsc_pop(&r->sq); in rtio_sqe_drop_all()
1027 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe); in rtio_sqe_drop_all()
1028 node = mpsc_pop(&r->sq); in rtio_sqe_drop_all()
1037 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool); in rtio_cqe_acquire()
1053 mpsc_push(&r->cq, &cqe->q); in rtio_cqe_produce()
1073 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) { in rtio_cqe_consume()
1078 node = mpsc_pop(&r->cq); in rtio_cqe_consume()
1103 k_sem_take(r->consume_sem, K_FOREVER); in rtio_cqe_consume_block()
1105 node = mpsc_pop(&r->cq); in rtio_cqe_consume_block()
1108 node = mpsc_pop(&r->cq); in rtio_cqe_consume_block()
1123 rtio_cqe_pool_free(r->cqe_pool, cqe); in rtio_cqe_release()
1137 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) { in rtio_cqe_compute_flags()
1138 struct rtio *r = iodev_sqe->r; in rtio_cqe_compute_flags()
1139 struct sys_mem_blocks *mem_pool = r->block_pool; in rtio_cqe_compute_flags()
1140 int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >> in rtio_cqe_compute_flags()
1141 mem_pool->info.blk_sz_shift; in rtio_cqe_compute_flags()
1142 int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift; in rtio_cqe_compute_flags()
1165 * @return -EINVAL if the buffer wasn't allocated for this cqe
1166 * @return -ENOTSUP if memory blocks are disabled
1175 if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) { in z_impl_rtio_cqe_get_mempool_buffer()
1176 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags); in z_impl_rtio_cqe_get_mempool_buffer()
1177 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags); in z_impl_rtio_cqe_get_mempool_buffer()
1180 *buff = r->block_pool->buffer + blk_idx * blk_size; in z_impl_rtio_cqe_get_mempool_buffer()
1182 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer); in z_impl_rtio_cqe_get_mempool_buffer()
1184 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks); in z_impl_rtio_cqe_get_mempool_buffer()
1187 return -EINVAL; in z_impl_rtio_cqe_get_mempool_buffer()
1194 return -ENOTSUP; in z_impl_rtio_cqe_get_mempool_buffer()
1235 * @param result Integer result code (could be -errno)
1244 atomic_inc(&r->xcqcnt); in rtio_cqe_submit()
1246 cqe->result = result; in rtio_cqe_submit()
1247 cqe->userdata = userdata; in rtio_cqe_submit()
1248 cqe->flags = flags; in rtio_cqe_submit()
1252 atomic_inc(&r->cq_count); in rtio_cqe_submit()
1254 if (r->submit_count > 0) { in rtio_cqe_submit()
1255 r->submit_count--; in rtio_cqe_submit()
1256 if (r->submit_count == 0) { in rtio_cqe_submit()
1257 k_sem_give(r->submit_sem); in rtio_cqe_submit()
1262 k_sem_give(r->consume_sem); in rtio_cqe_submit()
1266 #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1274 * @param[out] buf Where to store the pointer to the buffer
1275 * @param[out] buf_len Where to store the size of the buffer
1277 * @return 0 if @p buf and @p buf_len were successfully filled
1278 * @return -ENOMEM Not enough memory for @p min_buf_len
1281 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len) in rtio_sqe_rx_buf() argument
1283 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe; in rtio_sqe_rx_buf()
1286 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) { in rtio_sqe_rx_buf()
1287 struct rtio *r = iodev_sqe->r; in rtio_sqe_rx_buf()
1289 if (sqe->rx.buf != NULL) { in rtio_sqe_rx_buf()
1290 if (sqe->rx.buf_len < min_buf_len) { in rtio_sqe_rx_buf()
1291 return -ENOMEM; in rtio_sqe_rx_buf()
1293 *buf = sqe->rx.buf; in rtio_sqe_rx_buf()
1294 *buf_len = sqe->rx.buf_len; in rtio_sqe_rx_buf()
1298 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len); in rtio_sqe_rx_buf()
1300 sqe->rx.buf = *buf; in rtio_sqe_rx_buf()
1301 sqe->rx.buf_len = *buf_len; in rtio_sqe_rx_buf()
1305 return -ENOMEM; in rtio_sqe_rx_buf()
1311 if (sqe->rx.buf_len < min_buf_len) { in rtio_sqe_rx_buf()
1312 return -ENOMEM; in rtio_sqe_rx_buf()
1315 *buf = sqe->rx.buf; in rtio_sqe_rx_buf()
1316 *buf_len = sqe->rx.buf_len; in rtio_sqe_rx_buf()
1339 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) { in z_impl_rtio_release_buffer()
1359 k_object_access_grant(r->submit_sem, t); in rtio_access_grant()
1363 k_object_access_grant(r->consume_sem, t); in rtio_access_grant()
1370 * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
1384 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED; in z_impl_rtio_sqe_cancel()
1404 * @retval -ENOMEM not enough room in the queue
1417 return -ENOMEM; in z_impl_rtio_sqe_copy_in_get_handles()
1446 * @retval -ENOMEM not enough room in the queue
1521 k_sem_reset(r->submit_sem); in z_impl_rtio_submit()
1522 r->submit_count = wait_count; in z_impl_rtio_submit()
1525 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count; in z_impl_rtio_submit()
1540 res = k_sem_take(r->submit_sem, K_FOREVER); in z_impl_rtio_submit()
1545 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) { in z_impl_rtio_submit()