Lines Matching +full:op +full:- +full:mode
4 * SPDX-License-Identifier: Apache-2.0
9 * @brief Real-Time IO device API for moving bytes with low effort
126 * -ECANCELED as the result.
231 * @brief I3C HDR Mode Mask
236 * @brief I3C HDR Mode Mask
242 * @brief I3C HDR Mode Mask
278 * @param sqe Submission for the callback op
287 uint8_t op; /**< Op code */ member
289 uint8_t prio; /**< Op priority */
291 uint16_t flags; /**< Op Flags */
293 uint32_t iodev_flags; /**< Op iodev flags */
456 if (r == NULL || r->block_pool == NULL) { in rtio_mempool_block_size()
459 return BIT(r->block_pool->info.blk_sz_shift); in rtio_mempool_block_size()
474 struct sys_mem_blocks *mem_pool = r->block_pool; in __rtio_compute_mempool_block_index()
477 uintptr_t buff = (uintptr_t)mem_pool->buffer; in __rtio_compute_mempool_block_index()
478 uint32_t buff_size = mem_pool->info.num_blocks * block_size; in __rtio_compute_mempool_block_index()
483 return (addr - buff) / block_size; in __rtio_compute_mempool_block_index()
559 * @brief Prepare a nop (no op) submission
566 sqe->op = RTIO_OP_NOP; in rtio_sqe_prep_nop()
567 sqe->iodev = iodev; in rtio_sqe_prep_nop()
568 sqe->userdata = userdata; in rtio_sqe_prep_nop()
572 * @brief Prepare a read op submission
582 sqe->op = RTIO_OP_RX; in rtio_sqe_prep_read()
583 sqe->prio = prio; in rtio_sqe_prep_read()
584 sqe->iodev = iodev; in rtio_sqe_prep_read()
585 sqe->rx.buf_len = len; in rtio_sqe_prep_read()
586 sqe->rx.buf = buf; in rtio_sqe_prep_read()
587 sqe->userdata = userdata; in rtio_sqe_prep_read()
591 * @brief Prepare a read op submission with context's mempool
600 sqe->flags = RTIO_SQE_MEMPOOL_BUFFER; in rtio_sqe_prep_read_with_pool()
608 sqe->flags |= RTIO_SQE_MULTISHOT; in rtio_sqe_prep_read_multishot()
612 * @brief Prepare a write op submission
622 sqe->op = RTIO_OP_TX; in rtio_sqe_prep_write()
623 sqe->prio = prio; in rtio_sqe_prep_write()
624 sqe->iodev = iodev; in rtio_sqe_prep_write()
625 sqe->tx.buf_len = len; in rtio_sqe_prep_write()
626 sqe->tx.buf = buf; in rtio_sqe_prep_write()
627 sqe->userdata = userdata; in rtio_sqe_prep_write()
631 * @brief Prepare a tiny write op submission
647 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf)); in rtio_sqe_prep_tiny_write()
650 sqe->op = RTIO_OP_TINY_TX; in rtio_sqe_prep_tiny_write()
651 sqe->prio = prio; in rtio_sqe_prep_tiny_write()
652 sqe->iodev = iodev; in rtio_sqe_prep_tiny_write()
653 sqe->tiny_tx.buf_len = tiny_write_len; in rtio_sqe_prep_tiny_write()
654 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len); in rtio_sqe_prep_tiny_write()
655 sqe->userdata = userdata; in rtio_sqe_prep_tiny_write()
659 * @brief Prepare a callback op submission
661 * A somewhat special operation in that it may only be done in kernel mode.
672 sqe->op = RTIO_OP_CALLBACK; in rtio_sqe_prep_callback()
673 sqe->prio = 0; in rtio_sqe_prep_callback()
674 sqe->iodev = NULL; in rtio_sqe_prep_callback()
675 sqe->callback.callback = callback; in rtio_sqe_prep_callback()
676 sqe->callback.arg0 = arg0; in rtio_sqe_prep_callback()
677 sqe->userdata = userdata; in rtio_sqe_prep_callback()
681 * @brief Prepare a callback op submission that does not create a CQE
696 sqe->flags |= RTIO_SQE_NO_RESPONSE; in rtio_sqe_prep_callback_no_cqe()
700 * @brief Prepare a transceive op submission
711 sqe->op = RTIO_OP_TXRX; in rtio_sqe_prep_transceive()
712 sqe->prio = prio; in rtio_sqe_prep_transceive()
713 sqe->iodev = iodev; in rtio_sqe_prep_transceive()
714 sqe->txrx.buf_len = buf_len; in rtio_sqe_prep_transceive()
715 sqe->txrx.tx_buf = tx_buf; in rtio_sqe_prep_transceive()
716 sqe->txrx.rx_buf = rx_buf; in rtio_sqe_prep_transceive()
717 sqe->userdata = userdata; in rtio_sqe_prep_transceive()
722 struct mpsc_node *node = mpsc_pop(&pool->free_q); in rtio_sqe_pool_alloc()
730 pool->pool_free--; in rtio_sqe_pool_alloc()
737 mpsc_push(&pool->free_q, &iodev_sqe->q); in rtio_sqe_pool_free()
739 pool->pool_free++; in rtio_sqe_pool_free()
744 struct mpsc_node *node = mpsc_pop(&pool->free_q); in rtio_cqe_pool_alloc()
754 pool->pool_free--; in rtio_cqe_pool_alloc()
761 mpsc_push(&pool->free_q, &cqe->q); in rtio_cqe_pool_free()
763 pool->pool_free++; in rtio_cqe_pool_free()
775 return -ENOTSUP; in rtio_block_pool_alloc()
784 return -ENOMEM; in rtio_block_pool_alloc()
789 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf); in rtio_block_pool_alloc()
800 bytes -= block_size; in rtio_block_pool_alloc()
803 return -ENOMEM; in rtio_block_pool_alloc()
814 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift; in rtio_block_pool_free()
816 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks); in rtio_block_pool_free()
821 /* clang-format off */
914 /* clang-format on */
932 /* clang-format on */
943 return r->sqe_pool->pool_free; in rtio_sqe_acquirable()
956 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) { in rtio_txn_next()
957 return iodev_sqe->next; in rtio_txn_next()
974 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) { in rtio_chain_next()
975 return iodev_sqe->next; in rtio_chain_next()
991 return iodev_sqe->next; in rtio_iodev_sqe_next()
1004 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool); in rtio_sqe_acquire()
1010 mpsc_push(&r->sq, &iodev_sqe->q); in rtio_sqe_acquire()
1012 return &iodev_sqe->sqe; in rtio_sqe_acquire()
1023 struct mpsc_node *node = mpsc_pop(&r->sq); in rtio_sqe_drop_all()
1027 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe); in rtio_sqe_drop_all()
1028 node = mpsc_pop(&r->sq); in rtio_sqe_drop_all()
1037 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool); in rtio_cqe_acquire()
1053 mpsc_push(&r->cq, &cqe->q); in rtio_cqe_produce()
1073 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) { in rtio_cqe_consume()
1078 node = mpsc_pop(&r->cq); in rtio_cqe_consume()
1103 k_sem_take(r->consume_sem, K_FOREVER); in rtio_cqe_consume_block()
1105 node = mpsc_pop(&r->cq); in rtio_cqe_consume_block()
1108 node = mpsc_pop(&r->cq); in rtio_cqe_consume_block()
1123 rtio_cqe_pool_free(r->cqe_pool, cqe); in rtio_cqe_release()
1137 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) { in rtio_cqe_compute_flags()
1138 struct rtio *r = iodev_sqe->r; in rtio_cqe_compute_flags()
1139 struct sys_mem_blocks *mem_pool = r->block_pool; in rtio_cqe_compute_flags()
1140 int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >> in rtio_cqe_compute_flags()
1141 mem_pool->info.blk_sz_shift; in rtio_cqe_compute_flags()
1142 int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift; in rtio_cqe_compute_flags()
1165 * @return -EINVAL if the buffer wasn't allocated for this cqe
1166 * @return -ENOTSUP if memory blocks are disabled
1175 if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) { in z_impl_rtio_cqe_get_mempool_buffer()
1176 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags); in z_impl_rtio_cqe_get_mempool_buffer()
1177 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags); in z_impl_rtio_cqe_get_mempool_buffer()
1180 *buff = r->block_pool->buffer + blk_idx * blk_size; in z_impl_rtio_cqe_get_mempool_buffer()
1182 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer); in z_impl_rtio_cqe_get_mempool_buffer()
1184 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks); in z_impl_rtio_cqe_get_mempool_buffer()
1187 return -EINVAL; in z_impl_rtio_cqe_get_mempool_buffer()
1194 return -ENOTSUP; in z_impl_rtio_cqe_get_mempool_buffer()
1235 * @param result Integer result code (could be -errno)
1244 atomic_inc(&r->xcqcnt); in rtio_cqe_submit()
1246 cqe->result = result; in rtio_cqe_submit()
1247 cqe->userdata = userdata; in rtio_cqe_submit()
1248 cqe->flags = flags; in rtio_cqe_submit()
1252 atomic_inc(&r->cq_count); in rtio_cqe_submit()
1254 if (r->submit_count > 0) { in rtio_cqe_submit()
1255 r->submit_count--; in rtio_cqe_submit()
1256 if (r->submit_count == 0) { in rtio_cqe_submit()
1257 k_sem_give(r->submit_sem); in rtio_cqe_submit()
1262 k_sem_give(r->consume_sem); in rtio_cqe_submit()
1266 #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1278 * @return -ENOMEM Not enough memory for @p min_buf_len
1283 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe; in rtio_sqe_rx_buf()
1286 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) { in rtio_sqe_rx_buf()
1287 struct rtio *r = iodev_sqe->r; in rtio_sqe_rx_buf()
1289 if (sqe->rx.buf != NULL) { in rtio_sqe_rx_buf()
1290 if (sqe->rx.buf_len < min_buf_len) { in rtio_sqe_rx_buf()
1291 return -ENOMEM; in rtio_sqe_rx_buf()
1293 *buf = sqe->rx.buf; in rtio_sqe_rx_buf()
1294 *buf_len = sqe->rx.buf_len; in rtio_sqe_rx_buf()
1300 sqe->rx.buf = *buf; in rtio_sqe_rx_buf()
1301 sqe->rx.buf_len = *buf_len; in rtio_sqe_rx_buf()
1305 return -ENOMEM; in rtio_sqe_rx_buf()
1311 if (sqe->rx.buf_len < min_buf_len) { in rtio_sqe_rx_buf()
1312 return -ENOMEM; in rtio_sqe_rx_buf()
1315 *buf = sqe->rx.buf; in rtio_sqe_rx_buf()
1316 *buf_len = sqe->rx.buf_len; in rtio_sqe_rx_buf()
1339 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) { in z_impl_rtio_release_buffer()
1359 k_object_access_grant(r->submit_sem, t); in rtio_access_grant()
1363 k_object_access_grant(r->consume_sem, t); in rtio_access_grant()
1370 * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
1384 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED; in z_impl_rtio_sqe_cancel()
1404 * @retval -ENOMEM not enough room in the queue
1417 return -ENOMEM; in z_impl_rtio_sqe_copy_in_get_handles()
1436 * RTIO is used from user mode where a copy must be made.
1446 * @retval -ENOMEM not enough room in the queue
1521 k_sem_reset(r->submit_sem); in z_impl_rtio_submit()
1522 r->submit_count = wait_count; in z_impl_rtio_submit()
1525 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count; in z_impl_rtio_submit()
1540 res = k_sem_take(r->submit_sem, K_FOREVER); in z_impl_rtio_submit()
1545 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) { in z_impl_rtio_submit()