Lines Matching refs:sqe

281 typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
493 struct rtio_sqe sqe; member
561 static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe, in rtio_sqe_prep_nop() argument
565 memset(sqe, 0, sizeof(struct rtio_sqe)); in rtio_sqe_prep_nop()
566 sqe->op = RTIO_OP_NOP; in rtio_sqe_prep_nop()
567 sqe->iodev = iodev; in rtio_sqe_prep_nop()
568 sqe->userdata = userdata; in rtio_sqe_prep_nop()
574 static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe, in rtio_sqe_prep_read() argument
581 memset(sqe, 0, sizeof(struct rtio_sqe)); in rtio_sqe_prep_read()
582 sqe->op = RTIO_OP_RX; in rtio_sqe_prep_read()
583 sqe->prio = prio; in rtio_sqe_prep_read()
584 sqe->iodev = iodev; in rtio_sqe_prep_read()
585 sqe->rx.buf_len = len; in rtio_sqe_prep_read()
586 sqe->rx.buf = buf; in rtio_sqe_prep_read()
587 sqe->userdata = userdata; in rtio_sqe_prep_read()
595 static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe, in rtio_sqe_prep_read_with_pool() argument
599 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata); in rtio_sqe_prep_read_with_pool()
600 sqe->flags = RTIO_SQE_MEMPOOL_BUFFER; in rtio_sqe_prep_read_with_pool()
603 static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe, in rtio_sqe_prep_read_multishot() argument
607 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata); in rtio_sqe_prep_read_multishot()
608 sqe->flags |= RTIO_SQE_MULTISHOT; in rtio_sqe_prep_read_multishot()
614 static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe, in rtio_sqe_prep_write() argument
621 memset(sqe, 0, sizeof(struct rtio_sqe)); in rtio_sqe_prep_write()
622 sqe->op = RTIO_OP_TX; in rtio_sqe_prep_write()
623 sqe->prio = prio; in rtio_sqe_prep_write()
624 sqe->iodev = iodev; in rtio_sqe_prep_write()
625 sqe->tx.buf_len = len; in rtio_sqe_prep_write()
626 sqe->tx.buf = buf; in rtio_sqe_prep_write()
627 sqe->userdata = userdata; in rtio_sqe_prep_write()
640 static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe, in rtio_sqe_prep_tiny_write() argument
647 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf)); in rtio_sqe_prep_tiny_write()
649 memset(sqe, 0, sizeof(struct rtio_sqe)); in rtio_sqe_prep_tiny_write()
650 sqe->op = RTIO_OP_TINY_TX; in rtio_sqe_prep_tiny_write()
651 sqe->prio = prio; in rtio_sqe_prep_tiny_write()
652 sqe->iodev = iodev; in rtio_sqe_prep_tiny_write()
653 sqe->tiny_tx.buf_len = tiny_write_len; in rtio_sqe_prep_tiny_write()
654 memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len); in rtio_sqe_prep_tiny_write()
655 sqe->userdata = userdata; in rtio_sqe_prep_tiny_write()
666 static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe, in rtio_sqe_prep_callback() argument
671 memset(sqe, 0, sizeof(struct rtio_sqe)); in rtio_sqe_prep_callback()
672 sqe->op = RTIO_OP_CALLBACK; in rtio_sqe_prep_callback()
673 sqe->prio = 0; in rtio_sqe_prep_callback()
674 sqe->iodev = NULL; in rtio_sqe_prep_callback()
675 sqe->callback.callback = callback; in rtio_sqe_prep_callback()
676 sqe->callback.arg0 = arg0; in rtio_sqe_prep_callback()
677 sqe->userdata = userdata; in rtio_sqe_prep_callback()
690 static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe, in rtio_sqe_prep_callback_no_cqe() argument
695 rtio_sqe_prep_callback(sqe, callback, arg0, userdata); in rtio_sqe_prep_callback_no_cqe()
696 sqe->flags |= RTIO_SQE_NO_RESPONSE; in rtio_sqe_prep_callback_no_cqe()
702 static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe, in rtio_sqe_prep_transceive() argument
710 memset(sqe, 0, sizeof(struct rtio_sqe)); in rtio_sqe_prep_transceive()
711 sqe->op = RTIO_OP_TXRX; in rtio_sqe_prep_transceive()
712 sqe->prio = prio; in rtio_sqe_prep_transceive()
713 sqe->iodev = iodev; in rtio_sqe_prep_transceive()
714 sqe->txrx.buf_len = buf_len; in rtio_sqe_prep_transceive()
715 sqe->txrx.tx_buf = tx_buf; in rtio_sqe_prep_transceive()
716 sqe->txrx.rx_buf = rx_buf; in rtio_sqe_prep_transceive()
717 sqe->userdata = userdata; in rtio_sqe_prep_transceive()
956 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) { in rtio_txn_next()
974 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) { in rtio_chain_next()
1012 return &iodev_sqe->sqe; in rtio_sqe_acquire()
1137 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) { in rtio_cqe_compute_flags()
1143 if (iodev_sqe->sqe.rx.buf) { in rtio_cqe_compute_flags()
1144 blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >> in rtio_cqe_compute_flags()
1146 blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift; in rtio_cqe_compute_flags()
1302 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe; in rtio_sqe_rx_buf() local
1305 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) { in rtio_sqe_rx_buf()
1308 if (sqe->rx.buf != NULL) { in rtio_sqe_rx_buf()
1309 if (sqe->rx.buf_len < min_buf_len) { in rtio_sqe_rx_buf()
1312 *buf = sqe->rx.buf; in rtio_sqe_rx_buf()
1313 *buf_len = sqe->rx.buf_len; in rtio_sqe_rx_buf()
1319 sqe->rx.buf = *buf; in rtio_sqe_rx_buf()
1320 sqe->rx.buf_len = *buf_len; in rtio_sqe_rx_buf()
1330 if (sqe->rx.buf_len < min_buf_len) { in rtio_sqe_rx_buf()
1334 *buf = sqe->rx.buf; in rtio_sqe_rx_buf()
1335 *buf_len = sqe->rx.buf_len; in rtio_sqe_rx_buf()
1396 __syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1398 static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe) in z_impl_rtio_sqe_cancel() argument
1400 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe); in z_impl_rtio_sqe_cancel()
1403 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED; in z_impl_rtio_sqe_cancel()
1432 struct rtio_sqe *sqe; in z_impl_rtio_sqe_copy_in_get_handles() local
1440 sqe = rtio_sqe_acquire(r); in z_impl_rtio_sqe_copy_in_get_handles()
1441 __ASSERT_NO_MSG(sqe != NULL); in z_impl_rtio_sqe_copy_in_get_handles()
1443 *handle = sqe; in z_impl_rtio_sqe_copy_in_get_handles()
1445 *sqe = sqes[i]; in z_impl_rtio_sqe_copy_in_get_handles()