1 /*
2 * Copyright (c) 2022 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Real-Time IO device API for moving bytes with low effort
10 *
11 * RTIO is a context for asynchronous batch operations using a submission and completion queue.
12 *
13 * Asynchronous I/O operation are setup in a submission queue. Each entry in the queue describes
14 * the operation it wishes to perform with some understood semantics.
15 *
16 * These operations may be chained in a such a way that only when the current
17 * operation is complete the next will be executed. If the current operation fails
18 * all chained operations will also fail.
19 *
20 * Operations may also be submitted as a transaction where a set of operations are considered
21 * to be one operation.
22 *
23 * The completion of these operations typically provide one or more completion queue events.
24 */
25
26 #ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27 #define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28
29 #include <string.h>
30
31 #include <zephyr/app_memory/app_memdomain.h>
32 #include <zephyr/device.h>
33 #include <zephyr/kernel.h>
34 #include <zephyr/rtio/rtio_mpsc.h>
35 #include <zephyr/sys/__assert.h>
36 #include <zephyr/sys/atomic.h>
37 #include <zephyr/sys/mem_blocks.h>
38 #include <zephyr/sys/util.h>
39 #include <zephyr/sys/iterable_sections.h>
40
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44
45
46 /**
47 * @brief RTIO
48 * @defgroup rtio RTIO
49 * @ingroup os_services
50 * @{
51 */
52
53 /**
54 * @brief RTIO Predefined Priorties
55 * @defgroup rtio_sqe_prio RTIO Priorities
56 * @ingroup rtio
57 * @{
58 */
59
60 /**
61 * @brief Low priority
62 */
63 #define RTIO_PRIO_LOW 0U
64
65 /**
66 * @brief Normal priority
67 */
68 #define RTIO_PRIO_NORM 127U
69
70 /**
71 * @brief High priority
72 */
73 #define RTIO_PRIO_HIGH 255U
74
75 /**
76 * @}
77 */
78
79
80 /**
81 * @brief RTIO SQE Flags
82 * @defgroup rtio_sqe_flags RTIO SQE Flags
83 * @ingroup rtio
84 * @{
85 */
86
87 /**
88 * @brief The next request in the queue should wait on this one.
89 *
90 * Chained SQEs are individual units of work describing patterns of
91 * ordering and failure cascading. A chained SQE must be started only
92 * after the one before it. They are given to the iodevs one after another.
93 */
94 #define RTIO_SQE_CHAINED BIT(0)
95
96 /**
97 * @brief The next request in the queue is part of a transaction.
98 *
99 * Transactional SQEs are sequential parts of a unit of work.
100 * Only the first transactional SQE is submitted to an iodev, the
101 * remaining SQEs are never individually submitted but instead considered
102 * to be part of the transaction to the single iodev. The first sqe in the
103 * sequence holds the iodev that will be used and the last holds the userdata
104 * that will be returned in a single completion on failure/success.
105 */
106 #define RTIO_SQE_TRANSACTION BIT(1)
107
108
109 /**
110 * @brief The buffer should be allocated by the RTIO mempool
111 *
112 * This flag can only exist if the CONFIG_RTIO_SYS_MEM_BLOCKS Kconfig was
113 * enabled and the RTIO context was created via the RTIO_DEFINE_WITH_MEMPOOL()
114 * macro. If set, the buffer associated with the entry was allocated by the
115 * internal memory pool and should be released as soon as it is no longer
116 * needed via a call to rtio_release_mempool().
117 */
118 #define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
119
120 /**
121 * @brief The SQE should not execute if possible
122 *
123 * If possible (not yet executed), the SQE should be canceled by flagging it as failed and returning
124 * -ECANCELED as the result.
125 */
126 #define RTIO_SQE_CANCELED BIT(3)
127
128 /**
129 * @brief The SQE should continue producing CQEs until canceled
130 *
131 * This flag must exist along @ref RTIO_SQE_MEMPOOL_BUFFER and signals that when a read is
132 * complete. It should be placed back in queue until canceled.
133 */
134 #define RTIO_SQE_MULTISHOT BIT(4)
135
136 /**
137 * @brief The SQE does not produce a CQE.
138 */
139 #define RTIO_SQE_NO_RESPONSE BIT(5)
140
141 /**
142 * @}
143 */
144
145 /**
146 * @brief RTIO CQE Flags
147 * @defgroup rtio_cqe_flags RTIO CQE Flags
148 * @ingroup rtio
149 * @{
150 */
151
152 /**
153 * @brief The entry's buffer was allocated from the RTIO's mempool
154 *
155 * If this bit is set, the buffer was allocated from the memory pool and should be recycled as
156 * soon as the application is done with it.
157 */
158 #define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
159
160 #define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
161
162 /**
163 * @brief Get the block index of a mempool flags
164 *
165 * @param flags The CQE flags value
166 * @return The block index portion of the flags field.
167 */
168 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
169
170 /**
171 * @brief Get the block count of a mempool flags
172 *
173 * @param flags The CQE flags value
174 * @return The block count portion of the flags field.
175 */
176 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
177
178 /**
179 * @brief Prepare CQE flags for a mempool read.
180 *
181 * @param blk_idx The mempool block index
182 * @param blk_cnt The mempool block count
183 * @return A shifted and masked value that can be added to the flags field with an OR operator.
184 */
185 #define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt) \
186 (FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) | \
187 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
188
189 /**
190 * @}
191 */
192
193 /**
194 * @brief Equivalent to the I2C_MSG_STOP flag
195 */
196 #define RTIO_IODEV_I2C_STOP BIT(0)
197
198 /**
199 * @brief Equivalent to the I2C_MSG_RESTART flag
200 */
201 #define RTIO_IODEV_I2C_RESTART BIT(1)
202
203 /**
204 * @brief Equivalent to the I2C_MSG_ADDR_10_BITS
205 */
206 #define RTIO_IODEV_I2C_10_BITS BIT(2)
207
208 /** @cond ignore */
209 struct rtio;
210 struct rtio_cqe;
211 struct rtio_sqe;
212 struct rtio_sqe_pool;
213 struct rtio_cqe_pool;
214 struct rtio_iodev;
215 struct rtio_iodev_sqe;
216 /** @endcond */
217
218 /**
219 * @typedef rtio_callback_t
220 * @brief Callback signature for RTIO_OP_CALLBACK
221 * @param r RTIO context being used with the callback
222 * @param sqe Submission for the callback op
223 * @param arg0 Argument option as part of the sqe
224 */
225 typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
226
227 /**
228 * @brief A submission queue event
229 */
230 struct rtio_sqe {
231 uint8_t op; /**< Op code */
232
233 uint8_t prio; /**< Op priority */
234
235 uint16_t flags; /**< Op Flags */
236
237 uint16_t iodev_flags; /**< Op iodev flags */
238
239 uint16_t _resv0;
240
241 const struct rtio_iodev *iodev; /**< Device to operation on */
242
243 /**
244 * User provided data which is returned upon operation completion. Could be a pointer or
245 * integer.
246 *
247 * If unique identification of completions is desired this should be
248 * unique as well.
249 */
250 void *userdata;
251
252 union {
253
254 /** OP_TX, OP_RX */
255 struct {
256 uint32_t buf_len; /**< Length of buffer */
257 uint8_t *buf; /**< Buffer to use*/
258 };
259
260 /** OP_TINY_TX */
261 struct {
262 uint8_t tiny_buf_len; /**< Length of tiny buffer */
263 uint8_t tiny_buf[7]; /**< Tiny buffer */
264 };
265
266 /** OP_CALLBACK */
267 struct {
268 rtio_callback_t callback;
269 void *arg0; /**< Last argument given to callback */
270 };
271
272 /** OP_TXRX */
273 struct {
274 uint32_t txrx_buf_len;
275 uint8_t *tx_buf;
276 uint8_t *rx_buf;
277 };
278
279 };
280 };
281
282 /** @cond ignore */
283 /* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
284 BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
285 /** @endcond */
286
287 /**
288 * @brief A completion queue event
289 */
290 struct rtio_cqe {
291 struct rtio_mpsc_node q;
292
293 int32_t result; /**< Result from operation */
294 void *userdata; /**< Associated userdata with operation */
295 uint32_t flags; /**< Flags associated with the operation */
296 };
297
298 struct rtio_sqe_pool {
299 struct rtio_mpsc free_q;
300 const uint16_t pool_size;
301 uint16_t pool_free;
302 struct rtio_iodev_sqe *pool;
303 };
304
305 struct rtio_cqe_pool {
306 struct rtio_mpsc free_q;
307 const uint16_t pool_size;
308 uint16_t pool_free;
309 struct rtio_cqe *pool;
310 };
311
312 /**
313 * @brief An RTIO context containing what can be viewed as a pair of queues.
314 *
315 * A queue for submissions (available and in queue to be produced) as well as a queue
316 * of completions (available and ready to be consumed).
317 *
318 * The rtio executor along with any objects implementing the rtio_iodev interface are
319 * the consumers of submissions and producers of completions.
320 *
321 * No work is started until rtio_submit() is called.
322 */
323 struct rtio {
324 #ifdef CONFIG_RTIO_SUBMIT_SEM
325 /* A wait semaphore which may suspend the calling thread
326 * to wait for some number of completions when calling submit
327 */
328 struct k_sem *submit_sem;
329
330 uint32_t submit_count;
331 #endif
332
333 #ifdef CONFIG_RTIO_CONSUME_SEM
334 /* A wait semaphore which may suspend the calling thread
335 * to wait for some number of completions while consuming
336 * them from the completion queue
337 */
338 struct k_sem *consume_sem;
339 #endif
340
341 /* Total number of completions */
342 atomic_t cq_count;
343
344 /* Number of completions that were unable to be submitted with results
345 * due to the cq spsc being full
346 */
347 atomic_t xcqcnt;
348
349 /* Submission queue object pool with free list */
350 struct rtio_sqe_pool *sqe_pool;
351
352 /* Complete queue object pool with free list */
353 struct rtio_cqe_pool *cqe_pool;
354
355 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
356 /* Mem block pool */
357 struct sys_mem_blocks *block_pool;
358 #endif
359
360 /* Submission queue */
361 struct rtio_mpsc sq;
362
363 /* Completion queue */
364 struct rtio_mpsc cq;
365 };
366
367 /** The memory partition associated with all RTIO context information */
368 extern struct k_mem_partition rtio_partition;
369
370 /**
371 * @brief Get the mempool block size of the RTIO context
372 *
373 * @param[in] r The RTIO context
374 * @return The size of each block in the context's mempool
375 * @return 0 if the context doesn't have a mempool
376 */
rtio_mempool_block_size(const struct rtio * r)377 static inline size_t rtio_mempool_block_size(const struct rtio *r)
378 {
379 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
380 ARG_UNUSED(r);
381 return 0;
382 #else
383 if (r == NULL || r->block_pool == NULL) {
384 return 0;
385 }
386 return BIT(r->block_pool->info.blk_sz_shift);
387 #endif
388 }
389
390 /**
391 * @brief Compute the mempool block index for a given pointer
392 *
393 * @param[in] r RTIO context
394 * @param[in] ptr Memory pointer in the mempool
395 * @return Index of the mempool block associated with the pointer. Or UINT16_MAX if invalid.
396 */
397 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
__rtio_compute_mempool_block_index(const struct rtio * r,const void * ptr)398 static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
399 {
400 uintptr_t addr = (uintptr_t)ptr;
401 struct sys_mem_blocks *mem_pool = r->block_pool;
402 uint32_t block_size = rtio_mempool_block_size(r);
403
404 uintptr_t buff = (uintptr_t)mem_pool->buffer;
405 uint32_t buff_size = mem_pool->info.num_blocks * block_size;
406
407 if (addr < buff || addr >= buff + buff_size) {
408 return UINT16_MAX;
409 }
410 return (addr - buff) / block_size;
411 }
412 #endif
413
414 /**
415 * @brief IO device submission queue entry
416 *
417 * May be cast safely to and from a rtio_sqe as they occupy the same memory provided by the pool
418 */
419 struct rtio_iodev_sqe {
420 struct rtio_sqe sqe;
421 struct rtio_mpsc_node q;
422 struct rtio_iodev_sqe *next;
423 struct rtio *r;
424 };
425
426 /**
427 * @brief API that an RTIO IO device should implement
428 */
429 struct rtio_iodev_api {
430 /**
431 * @brief Submit to the iodev an entry to work on
432 *
433 * This call should be short in duration and most likely
434 * either enqueue or kick off an entry with the hardware.
435 *
436 * @param iodev_sqe Submission queue entry
437 */
438 void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
439 };
440
441 /**
442 * @brief An IO device with a function table for submitting requests
443 */
444 struct rtio_iodev {
445 /* Function pointer table */
446 const struct rtio_iodev_api *api;
447
448 /* Queue of RTIO contexts with requests */
449 struct rtio_mpsc iodev_sq;
450
451 /* Data associated with this iodev */
452 void *data;
453 };
454
455 /** An operation that does nothing and will complete immediately */
456 #define RTIO_OP_NOP 0
457
458 /** An operation that receives (reads) */
459 #define RTIO_OP_RX (RTIO_OP_NOP+1)
460
461 /** An operation that transmits (writes) */
462 #define RTIO_OP_TX (RTIO_OP_RX+1)
463
464 /** An operation that transmits tiny writes by copying the data to write */
465 #define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
466
467 /** An operation that calls a given function (callback) */
468 #define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
469
470 /** An operation that transceives (reads and writes simultaneously) */
471 #define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
472
473
474 /**
475 * @brief Prepare a nop (no op) submission
476 */
rtio_sqe_prep_nop(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,void * userdata)477 static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
478 const struct rtio_iodev *iodev,
479 void *userdata)
480 {
481 memset(sqe, 0, sizeof(struct rtio_sqe));
482 sqe->op = RTIO_OP_NOP;
483 sqe->iodev = iodev;
484 sqe->userdata = userdata;
485 }
486
487 /**
488 * @brief Prepare a read op submission
489 */
rtio_sqe_prep_read(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)490 static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
491 const struct rtio_iodev *iodev,
492 int8_t prio,
493 uint8_t *buf,
494 uint32_t len,
495 void *userdata)
496 {
497 memset(sqe, 0, sizeof(struct rtio_sqe));
498 sqe->op = RTIO_OP_RX;
499 sqe->prio = prio;
500 sqe->iodev = iodev;
501 sqe->buf_len = len;
502 sqe->buf = buf;
503 sqe->userdata = userdata;
504 }
505
506 /**
507 * @brief Prepare a read op submission with context's mempool
508 *
509 * @see rtio_sqe_prep_read()
510 */
rtio_sqe_prep_read_with_pool(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)511 static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
512 const struct rtio_iodev *iodev, int8_t prio,
513 void *userdata)
514 {
515 rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
516 sqe->flags = RTIO_SQE_MEMPOOL_BUFFER;
517 }
518
rtio_sqe_prep_read_multishot(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)519 static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
520 const struct rtio_iodev *iodev, int8_t prio,
521 void *userdata)
522 {
523 rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
524 sqe->flags |= RTIO_SQE_MULTISHOT;
525 }
526
527 /**
528 * @brief Prepare a write op submission
529 */
rtio_sqe_prep_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)530 static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
531 const struct rtio_iodev *iodev,
532 int8_t prio,
533 uint8_t *buf,
534 uint32_t len,
535 void *userdata)
536 {
537 memset(sqe, 0, sizeof(struct rtio_sqe));
538 sqe->op = RTIO_OP_TX;
539 sqe->prio = prio;
540 sqe->iodev = iodev;
541 sqe->buf_len = len;
542 sqe->buf = buf;
543 sqe->userdata = userdata;
544 }
545
546 /**
547 * @brief Prepare a tiny write op submission
548 *
549 * Unlike the normal write operation where the source buffer must outlive the call
550 * the tiny write data in this case is copied to the sqe. It must be tiny to fit
551 * within the specified size of a rtio_sqe.
552 *
553 * This is useful in many scenarios with RTL logic where a write of the register to
554 * subsequently read must be done.
555 */
rtio_sqe_prep_tiny_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,const uint8_t * tiny_write_data,uint8_t tiny_write_len,void * userdata)556 static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
557 const struct rtio_iodev *iodev,
558 int8_t prio,
559 const uint8_t *tiny_write_data,
560 uint8_t tiny_write_len,
561 void *userdata)
562 {
563 __ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
564
565 memset(sqe, 0, sizeof(struct rtio_sqe));
566 sqe->op = RTIO_OP_TINY_TX;
567 sqe->prio = prio;
568 sqe->iodev = iodev;
569 sqe->tiny_buf_len = tiny_write_len;
570 memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
571 sqe->userdata = userdata;
572 }
573
574 /**
575 * @brief Prepare a callback op submission
576 *
577 * A somewhat special operation in that it may only be done in kernel mode.
578 *
579 * Used where general purpose logic is required in a queue of io operations to do
580 * transforms or logic.
581 */
rtio_sqe_prep_callback(struct rtio_sqe * sqe,rtio_callback_t callback,void * arg0,void * userdata)582 static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
583 rtio_callback_t callback,
584 void *arg0,
585 void *userdata)
586 {
587 memset(sqe, 0, sizeof(struct rtio_sqe));
588 sqe->op = RTIO_OP_CALLBACK;
589 sqe->prio = 0;
590 sqe->iodev = NULL;
591 sqe->callback = callback;
592 sqe->arg0 = arg0;
593 sqe->userdata = userdata;
594 }
595
596 /**
597 * @brief Prepare a transceive op submission
598 */
rtio_sqe_prep_transceive(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * tx_buf,uint8_t * rx_buf,uint32_t buf_len,void * userdata)599 static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
600 const struct rtio_iodev *iodev,
601 int8_t prio,
602 uint8_t *tx_buf,
603 uint8_t *rx_buf,
604 uint32_t buf_len,
605 void *userdata)
606 {
607 memset(sqe, 0, sizeof(struct rtio_sqe));
608 sqe->op = RTIO_OP_TXRX;
609 sqe->prio = prio;
610 sqe->iodev = iodev;
611 sqe->txrx_buf_len = buf_len;
612 sqe->tx_buf = tx_buf;
613 sqe->rx_buf = rx_buf;
614 sqe->userdata = userdata;
615 }
616
rtio_sqe_pool_alloc(struct rtio_sqe_pool * pool)617 static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
618 {
619 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
620
621 if (node == NULL) {
622 return NULL;
623 }
624
625 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
626
627 pool->pool_free--;
628
629 return iodev_sqe;
630 }
631
rtio_sqe_pool_free(struct rtio_sqe_pool * pool,struct rtio_iodev_sqe * iodev_sqe)632 static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
633 {
634 rtio_mpsc_push(&pool->free_q, &iodev_sqe->q);
635
636 pool->pool_free++;
637 }
638
rtio_cqe_pool_alloc(struct rtio_cqe_pool * pool)639 static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
640 {
641 struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
642
643 if (node == NULL) {
644 return NULL;
645 }
646
647 struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
648
649 memset(cqe, 0, sizeof(struct rtio_cqe));
650
651 pool->pool_free--;
652
653 return cqe;
654 }
655
rtio_cqe_pool_free(struct rtio_cqe_pool * pool,struct rtio_cqe * cqe)656 static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
657 {
658 rtio_mpsc_push(&pool->free_q, &cqe->q);
659
660 pool->pool_free++;
661 }
662
rtio_block_pool_alloc(struct rtio * r,size_t min_sz,size_t max_sz,uint8_t ** buf,uint32_t * buf_len)663 static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
664 size_t max_sz, uint8_t **buf, uint32_t *buf_len)
665 {
666 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
667 ARG_UNUSED(r);
668 ARG_UNUSED(min_sz);
669 ARG_UNUSED(max_sz);
670 ARG_UNUSED(buf);
671 ARG_UNUSED(buf_len);
672 return -ENOTSUP;
673 #else
674 const uint32_t block_size = rtio_mempool_block_size(r);
675 uint32_t bytes = max_sz;
676
677 do {
678 size_t num_blks = DIV_ROUND_UP(bytes, block_size);
679 int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
680
681 if (rc == 0) {
682 *buf_len = num_blks * block_size;
683 return 0;
684 }
685
686 bytes -= block_size;
687 } while (bytes >= min_sz);
688
689 return -ENOMEM;
690 #endif
691 }
692
rtio_block_pool_free(struct rtio * r,void * buf,uint32_t buf_len)693 static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
694 {
695 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
696 size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
697
698 sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
699 #endif
700 }
701
702 /* Do not try and reformat the macros */
703 /* clang-format off */
704
705 /**
706 * @brief Statically define and initialize an RTIO IODev
707 *
708 * @param name Name of the iodev
709 * @param iodev_api Pointer to struct rtio_iodev_api
710 * @param iodev_data Data pointer
711 */
712 #define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data) \
713 STRUCT_SECTION_ITERABLE(rtio_iodev, name) = { \
714 .api = (iodev_api), \
715 .iodev_sq = RTIO_MPSC_INIT((name.iodev_sq)), \
716 .data = (iodev_data), \
717 }
718
719 #define Z_RTIO_SQE_POOL_DEFINE(name, sz) \
720 static struct rtio_iodev_sqe _sqe_pool_##name[sz]; \
721 STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = { \
722 .free_q = RTIO_MPSC_INIT((name.free_q)), \
723 .pool_size = sz, \
724 .pool_free = sz, \
725 .pool = _sqe_pool_##name, \
726 }
727
728
729 #define Z_RTIO_CQE_POOL_DEFINE(name, sz) \
730 static struct rtio_cqe _cqe_pool_##name[sz]; \
731 STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = { \
732 .free_q = RTIO_MPSC_INIT((name.free_q)), \
733 .pool_size = sz, \
734 .pool_free = sz, \
735 .pool = _cqe_pool_##name, \
736 }
737
738 /**
739 * @brief Allocate to bss if available
740 *
741 * If CONFIG_USERSPACE is selected, allocate to the rtio_partition bss. Maps to:
742 * K_APP_BMEM(rtio_partition) static
743 *
744 * If CONFIG_USERSPACE is disabled, allocate as plain static:
745 * static
746 */
747 #define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
748
749 /**
750 * @brief Allocate as initialized memory if available
751 *
752 * If CONFIG_USERSPACE is selected, allocate to the rtio_partition init. Maps to:
753 * K_APP_DMEM(rtio_partition) static
754 *
755 * If CONFIG_USERSPACE is disabled, allocate as plain static:
756 * static
757 */
758 #define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
759
760 #define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align) \
761 RTIO_BMEM uint8_t __aligned(WB_UP(blk_align)) \
762 _block_pool_##name[blk_cnt*WB_UP(blk_sz)]; \
763 _SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt, _block_pool_##name, \
764 RTIO_DMEM)
765
766 #define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool) \
767 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, \
768 (static K_SEM_DEFINE(_submit_sem_##name, 0, K_SEM_MAX_LIMIT))) \
769 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, \
770 (static K_SEM_DEFINE(_consume_sem_##name, 0, K_SEM_MAX_LIMIT))) \
771 STRUCT_SECTION_ITERABLE(rtio, name) = { \
772 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &_submit_sem_##name,)) \
773 IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,)) \
774 IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &_consume_sem_##name,)) \
775 .cq_count = ATOMIC_INIT(0), \
776 .xcqcnt = ATOMIC_INIT(0), \
777 .sqe_pool = _sqe_pool, \
778 .cqe_pool = _cqe_pool, \
779 IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,)) \
780 .sq = RTIO_MPSC_INIT((name.sq)), \
781 .cq = RTIO_MPSC_INIT((name.cq)), \
782 }
783
784 /**
785 * @brief Statically define and initialize an RTIO context
786 *
787 * @param name Name of the RTIO
788 * @param sq_sz Size of the submission queue entry pool
789 * @param cq_sz Size of the completion queue entry pool
790 */
791 #define RTIO_DEFINE(name, sq_sz, cq_sz) \
792 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
793 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
794 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, NULL) \
795
796 /* clang-format on */
797
798 /**
799 * @brief Statically define and initialize an RTIO context
800 *
801 * @param name Name of the RTIO
802 * @param sq_sz Size of the submission queue, must be power of 2
803 * @param cq_sz Size of the completion queue, must be power of 2
804 * @param num_blks Number of blocks in the memory pool
805 * @param blk_size The number of bytes in each block
806 * @param balign The block alignment
807 */
808 #define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
809 Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz); \
810 Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz); \
811 Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
812 Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
813
814 /* clang-format on */
815
816 /**
817 * @brief Count of acquirable submission queue events
818 *
819 * @param r RTIO context
820 *
821 * @return Count of acquirable submission queue events
822 */
rtio_sqe_acquirable(struct rtio * r)823 static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
824 {
825 return r->sqe_pool->pool_free;
826 }
827
828 /**
829 * @brief Get the next sqe in the transaction
830 *
831 * @param iodev_sqe Submission queue entry
832 *
833 * @retval NULL if current sqe is last in transaction
834 * @retval struct rtio_sqe * if available
835 */
rtio_txn_next(const struct rtio_iodev_sqe * iodev_sqe)836 static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
837 {
838 if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
839 return iodev_sqe->next;
840 } else {
841 return NULL;
842 }
843 }
844
845
846 /**
847 * @brief Get the next sqe in the chain
848 *
849 * @param iodev_sqe Submission queue entry
850 *
851 * @retval NULL if current sqe is last in chain
852 * @retval struct rtio_sqe * if available
853 */
rtio_chain_next(const struct rtio_iodev_sqe * iodev_sqe)854 static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
855 {
856 if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
857 return iodev_sqe->next;
858 } else {
859 return NULL;
860 }
861 }
862
863 /**
864 * @brief Get the next sqe in the chain or transaction
865 *
866 * @param iodev_sqe Submission queue entry
867 *
868 * @retval NULL if current sqe is last in chain
869 * @retval struct rtio_iodev_sqe * if available
870 */
rtio_iodev_sqe_next(const struct rtio_iodev_sqe * iodev_sqe)871 static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
872 {
873 return iodev_sqe->next;
874 }
875
876 /**
877 * @brief Acquire a single submission queue event if available
878 *
879 * @param r RTIO context
880 *
881 * @retval sqe A valid submission queue event acquired from the submission queue
882 * @retval NULL No subsmission queue event available
883 */
rtio_sqe_acquire(struct rtio * r)884 static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
885 {
886 struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
887
888 if (iodev_sqe == NULL) {
889 return NULL;
890 }
891
892 rtio_mpsc_push(&r->sq, &iodev_sqe->q);
893
894 return &iodev_sqe->sqe;
895 }
896
897 /**
898 * @brief Drop all previously acquired sqe
899 *
900 * @param r RTIO context
901 */
rtio_sqe_drop_all(struct rtio * r)902 static inline void rtio_sqe_drop_all(struct rtio *r)
903 {
904 struct rtio_iodev_sqe *iodev_sqe;
905 struct rtio_mpsc_node *node = rtio_mpsc_pop(&r->sq);
906
907 while (node != NULL) {
908 iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
909 rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
910 node = rtio_mpsc_pop(&r->sq);
911 }
912 }
913
914 /**
915 * @brief Acquire a complete queue event if available
916 */
rtio_cqe_acquire(struct rtio * r)917 static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
918 {
919 struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
920
921 if (cqe == NULL) {
922 return NULL;
923 }
924
925 memset(cqe, 0, sizeof(struct rtio_cqe));
926
927 return cqe;
928 }
929
930 /**
931 * @brief Produce a complete queue event if available
932 */
rtio_cqe_produce(struct rtio * r,struct rtio_cqe * cqe)933 static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
934 {
935 rtio_mpsc_push(&r->cq, &cqe->q);
936 }
937
938 /**
939 * @brief Consume a single completion queue event if available
940 *
941 * If a completion queue event is returned rtio_cq_release(r) must be called
942 * at some point to release the cqe spot for the cqe producer.
943 *
944 * @param r RTIO context
945 *
946 * @retval cqe A valid completion queue event consumed from the completion queue
947 * @retval NULL No completion queue event available
948 */
rtio_cqe_consume(struct rtio * r)949 static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
950 {
951 struct rtio_mpsc_node *node;
952 struct rtio_cqe *cqe = NULL;
953
954 #ifdef CONFIG_RTIO_CONSUME_SEM
955 if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
956 return NULL;
957 }
958 #endif
959
960 node = rtio_mpsc_pop(&r->cq);
961 if (node == NULL) {
962 return NULL;
963 }
964 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
965
966 return cqe;
967 }
968
969 /**
970 * @brief Wait for and consume a single completion queue event
971 *
972 * If a completion queue event is returned rtio_cq_release(r) must be called
973 * at some point to release the cqe spot for the cqe producer.
974 *
975 * @param r RTIO context
976 *
977 * @retval cqe A valid completion queue event consumed from the completion queue
978 */
rtio_cqe_consume_block(struct rtio * r)979 static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
980 {
981 struct rtio_mpsc_node *node;
982 struct rtio_cqe *cqe;
983
984 #ifdef CONFIG_RTIO_CONSUME_SEM
985 k_sem_take(r->consume_sem, K_FOREVER);
986 #endif
987 node = rtio_mpsc_pop(&r->cq);
988 while (node == NULL) {
989 Z_SPIN_DELAY(1);
990 node = rtio_mpsc_pop(&r->cq);
991 }
992 cqe = CONTAINER_OF(node, struct rtio_cqe, q);
993
994 return cqe;
995 }
996
997 /**
998 * @brief Release consumed completion queue event
999 *
1000 * @param r RTIO context
1001 * @param cqe Completion queue entry
1002 */
rtio_cqe_release(struct rtio * r,struct rtio_cqe * cqe)1003 static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1004 {
1005 rtio_cqe_pool_free(r->cqe_pool, cqe);
1006 }
1007
1008 /**
1009 * @brief Compute the CQE flags from the rtio_iodev_sqe entry
1010 *
1011 * @param iodev_sqe The SQE entry in question.
1012 * @return The value that should be set for the CQE's flags field.
1013 */
rtio_cqe_compute_flags(struct rtio_iodev_sqe * iodev_sqe)1014 static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1015 {
1016 uint32_t flags = 0;
1017
1018 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1019 if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1020 struct rtio *r = iodev_sqe->r;
1021 struct sys_mem_blocks *mem_pool = r->block_pool;
1022 int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) >>
1023 mem_pool->info.blk_sz_shift;
1024 int blk_count = iodev_sqe->sqe.buf_len >> mem_pool->info.blk_sz_shift;
1025
1026 flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1027 }
1028 #else
1029 ARG_UNUSED(iodev_sqe);
1030 #endif
1031
1032 return flags;
1033 }
1034
1035 /**
1036 * @brief Retrieve the mempool buffer that was allocated for the CQE.
1037 *
1038 * If the RTIO context contains a memory pool, and the SQE was created by calling
1039 * rtio_sqe_read_with_pool(), this function can be used to retrieve the memory associated with the
1040 * read. Once processing is done, it should be released by calling rtio_release_buffer().
1041 *
1042 * @param[in] r RTIO context
1043 * @param[in] cqe The CQE handling the event.
1044 * @param[out] buff Pointer to the mempool buffer
1045 * @param[out] buff_len Length of the allocated buffer
1046 * @return 0 on success
1047 * @return -EINVAL if the buffer wasn't allocated for this cqe
1048 * @return -ENOTSUP if memory blocks are disabled
1049 */
1050 __syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1051 uint8_t **buff, uint32_t *buff_len);
1052
z_impl_rtio_cqe_get_mempool_buffer(const struct rtio * r,struct rtio_cqe * cqe,uint8_t ** buff,uint32_t * buff_len)1053 static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1054 uint8_t **buff, uint32_t *buff_len)
1055 {
1056 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1057 if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) {
1058 int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1059 int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1060 uint32_t blk_size = rtio_mempool_block_size(r);
1061
1062 *buff = r->block_pool->buffer + blk_idx * blk_size;
1063 *buff_len = blk_count * blk_size;
1064 __ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1065 __ASSERT_NO_MSG(*buff <
1066 r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1067 return 0;
1068 }
1069 return -EINVAL;
1070 #else
1071 ARG_UNUSED(r);
1072 ARG_UNUSED(cqe);
1073 ARG_UNUSED(buff);
1074 ARG_UNUSED(buff_len);
1075
1076 return -ENOTSUP;
1077 #endif
1078 }
1079
1080 void rtio_executor_submit(struct rtio *r);
1081 void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1082 void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1083
1084 /**
1085 * @brief Inform the executor of a submission completion with success
1086 *
1087 * This may start the next asynchronous request if one is available.
1088 *
1089 * @param iodev_sqe IODev Submission that has succeeded
1090 * @param result Result of the request
1091 */
rtio_iodev_sqe_ok(struct rtio_iodev_sqe * iodev_sqe,int result)1092 static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1093 {
1094 rtio_executor_ok(iodev_sqe, result);
1095 }
1096
1097 /**
1098 * @brief Inform the executor of a submissions completion with error
1099 *
1100 * This SHALL fail the remaining submissions in the chain.
1101 *
1102 * @param iodev_sqe Submission that has failed
1103 * @param result Result of the request
1104 */
rtio_iodev_sqe_err(struct rtio_iodev_sqe * iodev_sqe,int result)1105 static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1106 {
1107 rtio_executor_err(iodev_sqe, result);
1108 }
1109
1110 /**
1111 * @brief Cancel all requests that are pending for the iodev
1112 *
1113 * @param iodev IODev to cancel all requests for
1114 */
rtio_iodev_cancel_all(struct rtio_iodev * iodev)1115 static inline void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
1116 {
1117 /* Clear pending requests as -ENODATA */
1118 struct rtio_mpsc_node *node = rtio_mpsc_pop(&iodev->iodev_sq);
1119
1120 while (node != NULL) {
1121 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1122
1123 rtio_iodev_sqe_err(iodev_sqe, -ECANCELED);
1124 node = rtio_mpsc_pop(&iodev->iodev_sq);
1125 }
1126 }
1127
1128 /**
1129 * Submit a completion queue event with a given result and userdata
1130 *
1131 * Called by the executor to produce a completion queue event, no inherent
1132 * locking is performed and this is not safe to do from multiple callers.
1133 *
1134 * @param r RTIO context
1135 * @param result Integer result code (could be -errno)
1136 * @param userdata Userdata to pass along to completion
1137 * @param flags Flags to use for the CEQ see RTIO_CQE_FLAG_*
1138 */
rtio_cqe_submit(struct rtio * r,int result,void * userdata,uint32_t flags)1139 static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1140 {
1141 struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1142
1143 if (cqe == NULL) {
1144 atomic_inc(&r->xcqcnt);
1145 } else {
1146 cqe->result = result;
1147 cqe->userdata = userdata;
1148 cqe->flags = flags;
1149 rtio_cqe_produce(r, cqe);
1150 }
1151
1152 atomic_inc(&r->cq_count);
1153 #ifdef CONFIG_RTIO_SUBMIT_SEM
1154 if (r->submit_count > 0) {
1155 r->submit_count--;
1156 if (r->submit_count == 0) {
1157 k_sem_give(r->submit_sem);
1158 }
1159 }
1160 #endif
1161 #ifdef CONFIG_RTIO_CONSUME_SEM
1162 k_sem_give(r->consume_sem);
1163 #endif
1164 }
1165
1166 #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1167
1168 /**
1169 * @brief Get the buffer associate with the RX submission
1170 *
1171 * @param[in] iodev_sqe The submission to probe
1172 * @param[in] min_buf_len The minimum number of bytes needed for the operation
1173 * @param[in] max_buf_len The maximum number of bytes needed for the operation
1174 * @param[out] buf Where to store the pointer to the buffer
1175 * @param[out] buf_len Where to store the size of the buffer
1176 *
1177 * @return 0 if @p buf and @p buf_len were successfully filled
1178 * @return -ENOMEM Not enough memory for @p min_buf_len
1179 */
rtio_sqe_rx_buf(const struct rtio_iodev_sqe * iodev_sqe,uint32_t min_buf_len,uint32_t max_buf_len,uint8_t ** buf,uint32_t * buf_len)1180 static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1181 uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1182 {
1183 struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1184
1185 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1186 if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1187 struct rtio *r = iodev_sqe->r;
1188
1189 if (sqe->buf != NULL) {
1190 if (sqe->buf_len < min_buf_len) {
1191 return -ENOMEM;
1192 }
1193 *buf = sqe->buf;
1194 *buf_len = sqe->buf_len;
1195 return 0;
1196 }
1197
1198 int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1199 if (rc == 0) {
1200 sqe->buf = *buf;
1201 sqe->buf_len = *buf_len;
1202 return 0;
1203 }
1204
1205 return -ENOMEM;
1206 }
1207 #else
1208 ARG_UNUSED(max_buf_len);
1209 #endif
1210
1211 if (sqe->buf_len < min_buf_len) {
1212 return -ENOMEM;
1213 }
1214
1215 *buf = sqe->buf;
1216 *buf_len = sqe->buf_len;
1217 return 0;
1218 }
1219
1220 /**
1221 * @brief Release memory that was allocated by the RTIO's memory pool
1222 *
1223 * If the RTIO context was created by a call to RTIO_DEFINE_WITH_MEMPOOL(), then the cqe data might
1224 * contain a buffer that's owned by the RTIO context. In those cases (if the read request was
1225 * configured via rtio_sqe_read_with_pool()) the buffer must be returned back to the pool.
1226 *
1227 * Call this function when processing is complete. This function will validate that the memory
1228 * actually belongs to the RTIO context and will ignore invalid arguments.
1229 *
1230 * @param r RTIO context
1231 * @param buff Pointer to the buffer to be released.
1232 * @param buff_len Number of bytes to free (will be rounded up to nearest memory block).
1233 */
1234 __syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1235
z_impl_rtio_release_buffer(struct rtio * r,void * buff,uint32_t buff_len)1236 static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1237 {
1238 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1239 if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1240 return;
1241 }
1242
1243 rtio_block_pool_free(r, buff, buff_len);
1244 #else
1245 ARG_UNUSED(r);
1246 ARG_UNUSED(buff);
1247 ARG_UNUSED(buff_len);
1248 #endif
1249 }
1250
1251 /**
1252 * Grant access to an RTIO context to a user thread
1253 */
rtio_access_grant(struct rtio * r,struct k_thread * t)1254 static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1255 {
1256 k_object_access_grant(r, t);
1257
1258 #ifdef CONFIG_RTIO_SUBMIT_SEM
1259 k_object_access_grant(r->submit_sem, t);
1260 #endif
1261
1262 #ifdef CONFIG_RTIO_CONSUME_SEM
1263 k_object_access_grant(r->consume_sem, t);
1264 #endif
1265 }
1266
1267 /**
1268 * @brief Attempt to cancel an SQE
1269 *
1270 * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
1271 * result.
1272 *
1273 * @param[in] sqe The SQE to cancel
1274 * @return 0 if the SQE was flagged for cancellation
1275 * @return <0 on error
1276 */
1277 __syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1278
z_impl_rtio_sqe_cancel(struct rtio_sqe * sqe)1279 static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1280 {
1281 struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1282
1283 do {
1284 iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1285 iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1286 } while (iodev_sqe != NULL);
1287
1288 return 0;
1289 }
1290
1291 /**
1292 * @brief Copy an array of SQEs into the queue and get resulting handles back
1293 *
1294 * Copies one or more SQEs into the RTIO context and optionally returns their generated SQE handles.
1295 * Handles can be used to cancel events via the rtio_sqe_cancel() call.
1296 *
1297 * @param[in] r RTIO context
1298 * @param[in] sqes Pointer to an array of SQEs
1299 * @param[out] handle Optional pointer to @ref rtio_sqe pointer to store the handle of the
1300 * first generated SQE. Use NULL to ignore.
1301 * @param[in] sqe_count Count of sqes in array
1302 *
1303 * @retval 0 success
1304 * @retval -ENOMEM not enough room in the queue
1305 */
1306 __syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1307 struct rtio_sqe **handle, size_t sqe_count);
1308
z_impl_rtio_sqe_copy_in_get_handles(struct rtio * r,const struct rtio_sqe * sqes,struct rtio_sqe ** handle,size_t sqe_count)1309 static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1310 struct rtio_sqe **handle,
1311 size_t sqe_count)
1312 {
1313 struct rtio_sqe *sqe;
1314 uint32_t acquirable = rtio_sqe_acquirable(r);
1315
1316 if (acquirable < sqe_count) {
1317 return -ENOMEM;
1318 }
1319
1320 for (unsigned long i = 0; i < sqe_count; i++) {
1321 sqe = rtio_sqe_acquire(r);
1322 __ASSERT_NO_MSG(sqe != NULL);
1323 if (handle != NULL && i == 0) {
1324 *handle = sqe;
1325 }
1326 *sqe = sqes[i];
1327 }
1328
1329 return 0;
1330 }
1331
1332 /**
1333 * @brief Copy an array of SQEs into the queue
1334 *
1335 * Useful if a batch of submissions is stored in ROM or
1336 * RTIO is used from user mode where a copy must be made.
1337 *
1338 * Partial copying is not done as chained SQEs need to be submitted
1339 * as a whole set.
1340 *
1341 * @param r RTIO context
1342 * @param sqes Pointer to an array of SQEs
1343 * @param sqe_count Count of sqes in array
1344 *
1345 * @retval 0 success
1346 * @retval -ENOMEM not enough room in the queue
1347 */
rtio_sqe_copy_in(struct rtio * r,const struct rtio_sqe * sqes,size_t sqe_count)1348 static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1349 {
1350 return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1351 }
1352
1353 /**
1354 * @brief Copy an array of CQEs from the queue
1355 *
1356 * Copies from the RTIO context and its queue completion queue
1357 * events, waiting for the given time period to gather the number
1358 * of completions requested.
1359 *
1360 * @param r RTIO context
1361 * @param cqes Pointer to an array of SQEs
1362 * @param cqe_count Count of sqes in array
1363 * @param timeout Timeout to wait for each completion event. Total wait time is
1364 * potentially timeout*cqe_count at maximum.
1365 *
1366 * @retval copy_count Count of copied CQEs (0 to cqe_count)
1367 */
1368 __syscall int rtio_cqe_copy_out(struct rtio *r,
1369 struct rtio_cqe *cqes,
1370 size_t cqe_count,
1371 k_timeout_t timeout);
z_impl_rtio_cqe_copy_out(struct rtio * r,struct rtio_cqe * cqes,size_t cqe_count,k_timeout_t timeout)1372 static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1373 struct rtio_cqe *cqes,
1374 size_t cqe_count,
1375 k_timeout_t timeout)
1376 {
1377 size_t copied = 0;
1378 struct rtio_cqe *cqe;
1379 k_timepoint_t end = sys_timepoint_calc(timeout);
1380
1381 do {
1382 cqe = K_TIMEOUT_EQ(timeout, K_FOREVER) ? rtio_cqe_consume_block(r)
1383 : rtio_cqe_consume(r);
1384 if (cqe == NULL) {
1385 #ifdef CONFIG_BOARD_NATIVE_POSIX
1386 /* Native posix fakes the clock and only moves it forward when sleeping. */
1387 k_sleep(K_TICKS(1));
1388 #else
1389 Z_SPIN_DELAY(1);
1390 #endif
1391 continue;
1392 }
1393 cqes[copied++] = *cqe;
1394 rtio_cqe_release(r, cqe);
1395 } while (copied < cqe_count && !sys_timepoint_expired(end));
1396
1397 return copied;
1398 }
1399
1400 /**
1401 * @brief Submit I/O requests to the underlying executor
1402 *
1403 * Submits the queue of submission queue events to the executor.
1404 * The executor will do the work of managing tasks representing each
1405 * submission chain, freeing submission queue events when done, and
1406 * producing completion queue events as submissions are completed.
1407 *
1408 * @param r RTIO context
1409 * @param wait_count Number of submissions to wait for completion of.
1410 *
1411 * @retval 0 On success
1412 */
1413 __syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1414
z_impl_rtio_submit(struct rtio * r,uint32_t wait_count)1415 static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1416 {
1417 int res = 0;
1418
1419 #ifdef CONFIG_RTIO_SUBMIT_SEM
1420 /* TODO undefined behavior if another thread calls submit of course
1421 */
1422 if (wait_count > 0) {
1423 __ASSERT(!k_is_in_isr(),
1424 "expected rtio submit with wait count to be called from a thread");
1425
1426 k_sem_reset(r->submit_sem);
1427 r->submit_count = wait_count;
1428 }
1429 #else
1430 uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1431 #endif
1432
1433 /* Submit the queue to the executor which consumes submissions
1434 * and produces completions through ISR chains or other means.
1435 */
1436 rtio_executor_submit(r);
1437
1438
1439 /* TODO could be nicer if we could suspend the thread and not
1440 * wake up on each completion here.
1441 */
1442 #ifdef CONFIG_RTIO_SUBMIT_SEM
1443
1444 if (wait_count > 0) {
1445 res = k_sem_take(r->submit_sem, K_FOREVER);
1446 __ASSERT(res == 0,
1447 "semaphore was reset or timed out while waiting on completions!");
1448 }
1449 #else
1450 while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1451 Z_SPIN_DELAY(10);
1452 k_yield();
1453 }
1454 #endif
1455
1456 return res;
1457 }
1458
1459 /**
1460 * @}
1461 */
1462
1463 #ifdef __cplusplus
1464 }
1465 #endif
1466
1467 #include <syscalls/rtio.h>
1468
1469 #endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
1470