1 /*
2  * Copyright (c) 2022 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Real-Time IO device API for moving bytes with low effort
10  *
11  * RTIO is a context for asynchronous batch operations using a submission and completion queue.
12  *
13  * Asynchronous I/O operation are setup in a submission queue. Each entry in the queue describes
14  * the operation it wishes to perform with some understood semantics.
15  *
16  * These operations may be chained in a such a way that only when the current
17  * operation is complete the next will be executed. If the current operation fails
18  * all chained operations will also fail.
19  *
20  * Operations may also be submitted as a transaction where a set of operations are considered
21  * to be one operation.
22  *
23  * The completion of these operations typically provide one or more completion queue events.
24  */
25 
26 #ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27 #define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28 
29 #include <string.h>
30 
31 #include <zephyr/app_memory/app_memdomain.h>
32 #include <zephyr/device.h>
33 #include <zephyr/kernel.h>
34 #include <zephyr/rtio/rtio_mpsc.h>
35 #include <zephyr/sys/__assert.h>
36 #include <zephyr/sys/atomic.h>
37 #include <zephyr/sys/mem_blocks.h>
38 #include <zephyr/sys/util.h>
39 #include <zephyr/sys/iterable_sections.h>
40 
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44 
45 
46 /**
47  * @brief RTIO
48  * @defgroup rtio RTIO
49  * @ingroup os_services
50  * @{
51  * @}
52  */
53 
54 /**
55  * @brief RTIO API
56  * @defgroup rtio_api RTIO API
57  * @ingroup rtio
58  * @{
59  */
60 
61 /**
62  * @brief RTIO Predefined Priorties
63  * @defgroup rtio_sqe_prio RTIO Priorities
64  * @ingroup rtio_api
65  * @{
66  */
67 
68 /**
69  * @brief Low priority
70  */
71 #define RTIO_PRIO_LOW 0U
72 
73 /**
74  * @brief Normal priority
75  */
76 #define RTIO_PRIO_NORM 127U
77 
78 /**
79  * @brief High priority
80  */
81 #define RTIO_PRIO_HIGH 255U
82 
83 /**
84  * @}
85  */
86 
87 
88 /**
89  * @brief RTIO SQE Flags
90  * @defgroup rtio_sqe_flags RTIO SQE Flags
91  * @ingroup rtio_api
92  * @{
93  */
94 
95 /**
96  * @brief The next request in the queue should wait on this one.
97  *
98  * Chained SQEs are individual units of work describing patterns of
99  * ordering and failure cascading. A chained SQE must be started only
100  * after the one before it. They are given to the iodevs one after another.
101  */
102 #define RTIO_SQE_CHAINED BIT(0)
103 
104 /**
105  * @brief The next request in the queue is part of a transaction.
106  *
107  * Transactional SQEs are sequential parts of a unit of work.
108  * Only the first transactional SQE is submitted to an iodev, the
109  * remaining SQEs are never individually submitted but instead considered
110  * to be part of the transaction to the single iodev. The first sqe in the
111  * sequence holds the iodev that will be used and the last holds the userdata
112  * that will be returned in a single completion on failure/success.
113  */
114 #define RTIO_SQE_TRANSACTION BIT(1)
115 
116 
117 /**
118  * @brief Equivalent to the I2C_MSG_STOP flag
119  */
120 #define RTIO_IODEV_I2C_STOP BIT(0)
121 
122 /**
123  * @brief Equivalent to the I2C_MSG_RESTART flag
124  */
125 #define RTIO_IODEV_I2C_RESTART BIT(1)
126 
127 /**
128  * @brief Equivalent to the I2C_MSG_10_BITS
129  */
130 #define RTIO_IODEV_I2C_10_BITS BIT(2)
131 
132 /**
133  * @brief Equivalent to the I2C_MSG_ADDR_10_BITS
134  */
135 
136 /**
137  * @brief The buffer should be allocated by the RTIO mempool
138  *
139  * This flag can only exist if the CONFIG_RTIO_SYS_MEM_BLOCKS Kconfig was
140  * enabled and the RTIO context was created via the RTIO_DEFINE_WITH_MEMPOOL()
141  * macro. If set, the buffer associated with the entry was allocated by the
142  * internal memory pool and should be released as soon as it is no longer
143  * needed via a call to rtio_release_mempool().
144  */
145 #define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
146 
147 /**
148  * @brief The SQE should not execute if possible
149  *
150  * If possible (not yet executed), the SQE should be canceled by flagging it as failed and returning
151  * -ECANCELED as the result.
152  */
153 #define RTIO_SQE_CANCELED BIT(3)
154 
155 /**
156  * @brief The SQE should continue producing CQEs until canceled
157  *
158  * This flag must exist along :c:macro:`RTIO_SQE_MEMPOOL_BUFFER` and signals that when a read is
159  * complete. It should be placed back in queue until canceled.
160  */
161 #define RTIO_SQE_MULTISHOT BIT(4)
162 
163 /**
164  * @}
165  */
166 
167 /**
168  * @brief RTIO CQE Flags
169  * @defgroup rtio_cqe_flags RTIO CQE Flags
170  * @ingroup rtio_api
171  * @{
172  */
173 
174 /**
175  * @brief The entry's buffer was allocated from the RTIO's mempool
176  *
177  * If this bit is set, the buffer was allocated from the memory pool and should be recycled as
178  * soon as the application is done with it.
179  */
180 #define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
181 
182 #define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
183 
184 /**
185  * @brief Get the block index of a mempool flags
186  *
187  * @param flags The CQE flags value
188  * @return The block index portion of the flags field.
189  */
190 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
191 
192 /**
193  * @brief Get the block count of a mempool flags
194  *
195  * @param flags The CQE flags value
196  * @return The block count portion of the flags field.
197  */
198 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
199 
200 /**
201  * @brief Prepare CQE flags for a mempool read.
202  *
203  * @param blk_idx The mempool block index
204  * @param blk_cnt The mempool block count
205  * @return A shifted and masked value that can be added to the flags field with an OR operator.
206  */
207 #define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)                                               \
208 	(FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) |                                 \
209 	 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
210 
211 /**
212  * @}
213  */
214 
215 /** @cond ignore */
216 struct rtio;
217 struct rtio_cqe;
218 struct rtio_sqe;
219 struct rtio_sqe_pool;
220 struct rtio_cqe_pool;
221 struct rtio_block_pool;
222 struct rtio_iodev;
223 struct rtio_iodev_sqe;
224 /** @endcond */
225 
226 /**
227  * @typedef rtio_callback_t
228  * @brief Callback signature for RTIO_OP_CALLBACK
229  * @param r RTIO context being used with the callback
230  * @param sqe Submission for the callback op
231  * @param arg0 Argument option as part of the sqe
232  */
233 typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
234 
235 /**
236  * @brief A submission queue event
237  */
238 struct rtio_sqe {
239 	uint8_t op; /**< Op code */
240 
241 	uint8_t prio; /**< Op priority */
242 
243 	uint16_t flags; /**< Op Flags */
244 
245 	uint16_t iodev_flags; /**< Op iodev flags */
246 
247 	uint16_t _resv0;
248 
249 	const struct rtio_iodev *iodev; /**< Device to operation on */
250 
251 	/**
252 	 * User provided data which is returned upon operation completion. Could be a pointer or
253 	 * integer.
254 	 *
255 	 * If unique identification of completions is desired this should be
256 	 * unique as well.
257 	 */
258 	void *userdata;
259 
260 	union {
261 
262 		/** OP_TX, OP_RX */
263 		struct {
264 			uint32_t buf_len; /**< Length of buffer */
265 			uint8_t *buf; /**< Buffer to use*/
266 		};
267 
268 		/** OP_TINY_TX */
269 		struct {
270 			uint8_t tiny_buf_len; /**< Length of tiny buffer */
271 			uint8_t tiny_buf[7]; /**< Tiny buffer */
272 		};
273 
274 		/** OP_CALLBACK */
275 		struct {
276 			rtio_callback_t callback;
277 			void *arg0; /**< Last argument given to callback */
278 		};
279 
280 		/** OP_TXRX */
281 		struct {
282 			uint32_t txrx_buf_len;
283 			uint8_t *tx_buf;
284 			uint8_t *rx_buf;
285 		};
286 
287 	};
288 };
289 
290 /** @cond ignore */
291 /* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
292 BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
293 /** @endcond */
294 
295 /**
296  * @brief A completion queue event
297  */
298 struct rtio_cqe {
299 	struct rtio_mpsc_node q;
300 
301 	int32_t result; /**< Result from operation */
302 	void *userdata; /**< Associated userdata with operation */
303 	uint32_t flags; /**< Flags associated with the operation */
304 };
305 
306 struct rtio_sqe_pool {
307 	struct rtio_mpsc free_q;
308 	const uint16_t pool_size;
309 	uint16_t pool_free;
310 	struct rtio_iodev_sqe *pool;
311 };
312 
313 struct rtio_cqe_pool {
314 	struct rtio_mpsc free_q;
315 	const uint16_t pool_size;
316 	uint16_t pool_free;
317 	struct rtio_cqe *pool;
318 };
319 
320 struct rtio_block_pool {
321 	/* Memory pool associated with this RTIO context. */
322 	struct sys_mem_blocks *mempool;
323 	/* The size (in bytes) of a single block in the mempool */
324 	const uint32_t blk_size;
325 };
326 
327 /**
328  * @brief An RTIO context containing what can be viewed as a pair of queues.
329  *
330  * A queue for submissions (available and in queue to be produced) as well as a queue
331  * of completions (available and ready to be consumed).
332  *
333  * The rtio executor along with any objects implementing the rtio_iodev interface are
334  * the consumers of submissions and producers of completions.
335  *
336  * No work is started until rtio_submit is called.
337  */
338 struct rtio {
339 #ifdef CONFIG_RTIO_SUBMIT_SEM
340 	/* A wait semaphore which may suspend the calling thread
341 	 * to wait for some number of completions when calling submit
342 	 */
343 	struct k_sem *submit_sem;
344 
345 	uint32_t submit_count;
346 #endif
347 
348 #ifdef CONFIG_RTIO_CONSUME_SEM
349 	/* A wait semaphore which may suspend the calling thread
350 	 * to wait for some number of completions while consuming
351 	 * them from the completion queue
352 	 */
353 	struct k_sem *consume_sem;
354 #endif
355 
356 	/* Number of completions that were unable to be submitted with results
357 	 * due to the cq spsc being full
358 	 */
359 	atomic_t xcqcnt;
360 
361 	/* Submission queue object pool with free list */
362 	struct rtio_sqe_pool *sqe_pool;
363 
364 	/* Complete queue object pool with free list */
365 	struct rtio_cqe_pool *cqe_pool;
366 
367 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
368 	/* Mem block pool */
369 	struct rtio_block_pool *block_pool;
370 #endif
371 
372 	/* Submission queue */
373 	struct rtio_mpsc sq;
374 
375 	/* Completion queue */
376 	struct rtio_mpsc cq;
377 };
378 
379 /** The memory partition associated with all RTIO context information */
380 extern struct k_mem_partition rtio_partition;
381 
382 /**
383  * @brief Compute the mempool block index for a given pointer
384  *
385  * @param[in] r RTIO contex
386  * @param[in] ptr Memory pointer in the mempool
387  * @return Index of the mempool block associated with the pointer. Or UINT16_MAX if invalid.
388  */
389 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
__rtio_compute_mempool_block_index(const struct rtio * r,const void * ptr)390 static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
391 {
392 	uintptr_t addr = (uintptr_t)ptr;
393 	struct sys_mem_blocks *mem_pool = r->block_pool->mempool;
394 	uint32_t block_size = r->block_pool->blk_size;
395 
396 	uintptr_t buff = (uintptr_t)mem_pool->buffer;
397 	uint32_t buff_size = mem_pool->num_blocks * block_size;
398 
399 	if (addr < buff || addr >= buff + buff_size) {
400 		return UINT16_MAX;
401 	}
402 	return (addr - buff) / block_size;
403 }
404 #endif
405 
406 /**
407  * @brief IO device submission queue entry
408  *
409  * May be cast safely to and from a rtio_sqe as they occupy the same memory provided by the pool
410  */
411 struct rtio_iodev_sqe {
412 	struct rtio_sqe sqe;
413 	struct rtio_mpsc_node q;
414 	struct rtio_iodev_sqe *next;
415 	struct rtio *r;
416 };
417 
418 /**
419  * @brief API that an RTIO IO device should implement
420  */
421 struct rtio_iodev_api {
422 	/**
423 	 * @brief Submit to the iodev an entry to work on
424 	 *
425 	 * This call should be short in duration and most likely
426 	 * either enqueue or kick off an entry with the hardware.
427 	 *
428 	 * If polling is required the iodev should add itself to the execution
429 	 * context (@see rtio_add_pollable())
430 	 *
431 	 * @param iodev_sqe Submission queue entry
432 	 */
433 	void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
434 };
435 
436 /**
437  * @brief An IO device with a function table for submitting requests
438  */
439 struct rtio_iodev {
440 	/* Function pointer table */
441 	const struct rtio_iodev_api *api;
442 
443 	/* Queue of RTIO contexts with requests */
444 	struct rtio_mpsc iodev_sq;
445 
446 	/* Data associated with this iodev */
447 	void *data;
448 };
449 
450 /** An operation that does nothing and will complete immediately */
451 #define RTIO_OP_NOP 0
452 
453 /** An operation that receives (reads) */
454 #define RTIO_OP_RX (RTIO_OP_NOP+1)
455 
456 /** An operation that transmits (writes) */
457 #define RTIO_OP_TX (RTIO_OP_RX+1)
458 
459 /** An operation that transmits tiny writes by copying the data to write */
460 #define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
461 
462 /** An operation that calls a given function (callback) */
463 #define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
464 
465 /** An operation that transceives (reads and writes simultaneously) */
466 #define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
467 
468 
469 /**
470  * @brief Prepare a nop (no op) submission
471  */
rtio_sqe_prep_nop(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,void * userdata)472 static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
473 				const struct rtio_iodev *iodev,
474 				void *userdata)
475 {
476 	memset(sqe, 0, sizeof(struct rtio_sqe));
477 	sqe->op = RTIO_OP_NOP;
478 	sqe->iodev = iodev;
479 	sqe->userdata = userdata;
480 }
481 
482 /**
483  * @brief Prepare a read op submission
484  */
rtio_sqe_prep_read(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)485 static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
486 				      const struct rtio_iodev *iodev,
487 				      int8_t prio,
488 				      uint8_t *buf,
489 				      uint32_t len,
490 				      void *userdata)
491 {
492 	memset(sqe, 0, sizeof(struct rtio_sqe));
493 	sqe->op = RTIO_OP_RX;
494 	sqe->prio = prio;
495 	sqe->iodev = iodev;
496 	sqe->buf_len = len;
497 	sqe->buf = buf;
498 	sqe->userdata = userdata;
499 }
500 
501 /**
502  * @brief Prepare a read op submission with context's mempool
503  *
504  * @see rtio_sqe_prep_read()
505  */
rtio_sqe_prep_read_with_pool(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)506 static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
507 						const struct rtio_iodev *iodev, int8_t prio,
508 						void *userdata)
509 {
510 	rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
511 	sqe->flags = RTIO_SQE_MEMPOOL_BUFFER;
512 }
513 
rtio_sqe_prep_read_multishot(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)514 static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
515 						const struct rtio_iodev *iodev, int8_t prio,
516 						void *userdata)
517 {
518 	rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
519 	sqe->flags |= RTIO_SQE_MULTISHOT;
520 }
521 
522 /**
523  * @brief Prepare a write op submission
524  */
rtio_sqe_prep_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)525 static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
526 				       const struct rtio_iodev *iodev,
527 				       int8_t prio,
528 				       uint8_t *buf,
529 				       uint32_t len,
530 				       void *userdata)
531 {
532 	memset(sqe, 0, sizeof(struct rtio_sqe));
533 	sqe->op = RTIO_OP_TX;
534 	sqe->prio = prio;
535 	sqe->iodev = iodev;
536 	sqe->buf_len = len;
537 	sqe->buf = buf;
538 	sqe->userdata = userdata;
539 }
540 
541 /**
542  * @brief Prepare a tiny write op submission
543  *
544  * Unlike the normal write operation where the source buffer must outlive the call
545  * the tiny write data in this case is copied to the sqe. It must be tiny to fit
546  * within the specified size of a rtio_sqe.
547  *
548  * This is useful in many scenarios with RTL logic where a write of the register to
549  * subsequently read must be done.
550  */
rtio_sqe_prep_tiny_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,const uint8_t * tiny_write_data,uint8_t tiny_write_len,void * userdata)551 static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
552 					    const struct rtio_iodev *iodev,
553 					    int8_t prio,
554 					    const uint8_t *tiny_write_data,
555 					    uint8_t tiny_write_len,
556 					    void *userdata)
557 {
558 	__ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
559 
560 	memset(sqe, 0, sizeof(struct rtio_sqe));
561 	sqe->op = RTIO_OP_TINY_TX;
562 	sqe->prio = prio;
563 	sqe->iodev = iodev;
564 	sqe->tiny_buf_len = tiny_write_len;
565 	memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
566 	sqe->userdata = userdata;
567 }
568 
569 /**
570  * @brief Prepare a callback op submission
571  *
572  * A somewhat special operation in that it may only be done in kernel mode.
573  *
574  * Used where general purpose logic is required in a queue of io operations to do
575  * transforms or logic.
576  */
rtio_sqe_prep_callback(struct rtio_sqe * sqe,rtio_callback_t callback,void * arg0,void * userdata)577 static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
578 					  rtio_callback_t callback,
579 					  void *arg0,
580 					  void *userdata)
581 {
582 	memset(sqe, 0, sizeof(struct rtio_sqe));
583 	sqe->op = RTIO_OP_CALLBACK;
584 	sqe->prio = 0;
585 	sqe->iodev = NULL;
586 	sqe->callback = callback;
587 	sqe->arg0 = arg0;
588 	sqe->userdata = userdata;
589 }
590 
591 /**
592  * @brief Prepare a transceive op submission
593  */
rtio_sqe_prep_transceive(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * tx_buf,uint8_t * rx_buf,uint32_t buf_len,void * userdata)594 static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
595 					    const struct rtio_iodev *iodev,
596 					    int8_t prio,
597 					    uint8_t *tx_buf,
598 					    uint8_t *rx_buf,
599 					    uint32_t buf_len,
600 					    void *userdata)
601 {
602 	memset(sqe, 0, sizeof(struct rtio_sqe));
603 	sqe->op = RTIO_OP_TXRX;
604 	sqe->prio = prio;
605 	sqe->iodev = iodev;
606 	sqe->txrx_buf_len = buf_len;
607 	sqe->tx_buf = tx_buf;
608 	sqe->rx_buf = rx_buf;
609 	sqe->userdata = userdata;
610 }
611 
rtio_sqe_pool_alloc(struct rtio_sqe_pool * pool)612 static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
613 {
614 	struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
615 
616 	if (node == NULL) {
617 		return NULL;
618 	}
619 
620 	struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
621 
622 	pool->pool_free--;
623 
624 	return iodev_sqe;
625 }
626 
rtio_sqe_pool_free(struct rtio_sqe_pool * pool,struct rtio_iodev_sqe * iodev_sqe)627 static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
628 {
629 	rtio_mpsc_push(&pool->free_q, &iodev_sqe->q);
630 
631 	pool->pool_free++;
632 }
633 
rtio_cqe_pool_alloc(struct rtio_cqe_pool * pool)634 static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
635 {
636 	struct rtio_mpsc_node *node = rtio_mpsc_pop(&pool->free_q);
637 
638 	if (node == NULL) {
639 		return NULL;
640 	}
641 
642 	struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
643 
644 	memset(cqe, 0, sizeof(struct rtio_cqe));
645 
646 	pool->pool_free--;
647 
648 	return cqe;
649 }
650 
rtio_cqe_pool_free(struct rtio_cqe_pool * pool,struct rtio_cqe * cqe)651 static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
652 {
653 	rtio_mpsc_push(&pool->free_q, &cqe->q);
654 
655 	pool->pool_free++;
656 }
657 
rtio_block_pool_alloc(struct rtio_block_pool * pool,size_t min_sz,size_t max_sz,uint8_t ** buf,uint32_t * buf_len)658 static inline int rtio_block_pool_alloc(struct rtio_block_pool *pool, size_t min_sz,
659 					  size_t max_sz, uint8_t **buf, uint32_t *buf_len)
660 {
661 	uint32_t bytes = max_sz;
662 
663 	do {
664 		size_t num_blks = DIV_ROUND_UP(bytes, pool->blk_size);
665 		int rc = sys_mem_blocks_alloc_contiguous(pool->mempool, num_blks, (void **)buf);
666 
667 		if (rc == 0) {
668 			*buf_len = num_blks * pool->blk_size;
669 			return 0;
670 		}
671 
672 		bytes -= pool->blk_size;
673 	} while (bytes >= min_sz);
674 
675 	return -ENOMEM;
676 }
677 
rtio_block_pool_free(struct rtio_block_pool * pool,void * buf,uint32_t buf_len)678 static inline void rtio_block_pool_free(struct rtio_block_pool *pool, void *buf, uint32_t buf_len)
679 {
680 	size_t num_blks = buf_len / pool->blk_size;
681 
682 	sys_mem_blocks_free_contiguous(pool->mempool, buf, num_blks);
683 }
684 
685 /* Do not try and reformat the macros */
686 /* clang-format off */
687 
688 /**
689  * @brief Statically define and initialize an RTIO IODev
690  *
691  * @param name Name of the iodev
692  * @param iodev_api Pointer to struct rtio_iodev_api
693  * @param iodev_data Data pointer
694  */
695 #define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data)		\
696 	STRUCT_SECTION_ITERABLE(rtio_iodev, name) = {		\
697 		.api = (iodev_api),				\
698 		.iodev_sq = RTIO_MPSC_INIT((name.iodev_sq)),	\
699 		.data = (iodev_data),				\
700 	}
701 
702 #define Z_RTIO_SQE_POOL_DEFINE(name, sz)			\
703 	static struct rtio_iodev_sqe _sqe_pool_##name[sz];	\
704 	STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = {	\
705 		.free_q = RTIO_MPSC_INIT((name.free_q)),	\
706 		.pool_size = sz,				\
707 		.pool_free = sz,				\
708 		.pool = _sqe_pool_##name,			\
709 	}
710 
711 
712 #define Z_RTIO_CQE_POOL_DEFINE(name, sz)			\
713 	static struct rtio_cqe _cqe_pool_##name[sz];		\
714 	STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = {	\
715 		.free_q = RTIO_MPSC_INIT((name.free_q)),	\
716 		.pool_size = sz,				\
717 		.pool_free = sz,				\
718 		.pool = _cqe_pool_##name,			\
719 	}
720 
721 /**
722  * @brief Allocate to bss if available
723  *
724  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition bss. Maps to:
725  *   K_APP_BMEM(rtio_partition) static
726  *
727  * If CONFIG_USERSPACE is disabled, allocate as plain static:
728  *   static
729  */
730 #define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
731 
732 /**
733  * @brief Allocate as initialized memory if available
734  *
735  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition init. Maps to:
736  *   K_APP_DMEM(rtio_partition) static
737  *
738  * If CONFIG_USERSPACE is disabled, allocate as plain static:
739  *   static
740  */
741 #define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
742 
743 #define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align)				\
744 	RTIO_BMEM uint8_t __aligned(WB_UP(blk_align))						\
745 	_block_pool_##name[blk_cnt*WB_UP(blk_sz)];						\
746 	_SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(_sys_blocks_##name, WB_UP(blk_sz),			\
747 					    blk_cnt, _block_pool_##name,			\
748 					    RTIO_DMEM);						\
749 	static struct rtio_block_pool name = {							\
750 		.mempool = &_sys_blocks_##name,							\
751 		.blk_size = blk_sz,								\
752 	}
753 
754 #define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool)                                     \
755 	IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM,                                                         \
756 		   (static K_SEM_DEFINE(_submit_sem_##name, 0, K_SEM_MAX_LIMIT)))                  \
757 	IF_ENABLED(CONFIG_RTIO_CONSUME_SEM,                                                        \
758 		   (static K_SEM_DEFINE(_consume_sem_##name, 0, K_SEM_MAX_LIMIT)))                 \
759 	STRUCT_SECTION_ITERABLE(rtio, name) = {                                                    \
760 		IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &_submit_sem_##name,))           \
761 		IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,))                           \
762 		IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &_consume_sem_##name,))        \
763 		.xcqcnt = ATOMIC_INIT(0),                                                          \
764 		.sqe_pool = _sqe_pool,                                                             \
765 		.cqe_pool = _cqe_pool,                                                             \
766 		IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,))               \
767 		.sq = RTIO_MPSC_INIT((name.sq)),                                                   \
768 		.cq = RTIO_MPSC_INIT((name.cq)),                                                   \
769 	}
770 
771 /**
772  * @brief Statically define and initialize an RTIO context
773  *
774  * @param name Name of the RTIO
775  * @param sq_sz Size of the submission queue entry pool
776  * @param cq_sz Size of the completion queue entry pool
777  */
778 #define RTIO_DEFINE(name, sq_sz, cq_sz)					\
779 	Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz);			\
780 	Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz);			\
781 	Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, NULL)	\
782 
783 /* clang-format on */
784 
785 /**
786  * @brief Statically define and initialize an RTIO context
787  *
788  * @param name Name of the RTIO
789  * @param sq_sz Size of the submission queue, must be power of 2
790  * @param cq_sz Size of the completion queue, must be power of 2
791  * @param num_blks Number of blocks in the memory pool
792  * @param blk_size The number of bytes in each block
793  * @param balign The block alignment
794  */
795 #define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
796 	Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz);		\
797 	Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz);			\
798 	Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
799 	Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
800 
801 /* clang-format on */
802 
803 /**
804  * @brief Count of acquirable submission queue events
805  *
806  * @param r RTIO context
807  *
808  * @return Count of acquirable submission queue events
809  */
rtio_sqe_acquirable(struct rtio * r)810 static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
811 {
812 	return r->sqe_pool->pool_free;
813 }
814 
815 /**
816  * @brief Count of likely, but not gauranteed, consumable completion queue events
817  *
818  * @param r RTIO context
819  *
820  * @return Likely count of consumable completion queue events
821  */
rtio_cqe_consumable(struct rtio * r)822 static inline uint32_t rtio_cqe_consumable(struct rtio *r)
823 {
824 	return (r->cqe_pool->pool_size - r->cqe_pool->pool_free);
825 }
826 
827 /**
828  * @brief Get the next sqe in the transaction
829  *
830  * @param iodev_sqe Submission queue entry
831  *
832  * @retval NULL if current sqe is last in transaction
833  * @retval struct rtio_sqe * if available
834  */
rtio_txn_next(const struct rtio_iodev_sqe * iodev_sqe)835 static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
836 {
837 	if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
838 		return iodev_sqe->next;
839 	} else {
840 		return NULL;
841 	}
842 }
843 
844 
845 /**
846  * @brief Get the next sqe in the chain
847  *
848  * @param iodev_sqe Submission queue entry
849  *
850  * @retval NULL if current sqe is last in chain
851  * @retval struct rtio_sqe * if available
852  */
rtio_chain_next(const struct rtio_iodev_sqe * iodev_sqe)853 static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
854 {
855 	if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
856 		return iodev_sqe->next;
857 	} else {
858 		return NULL;
859 	}
860 }
861 
862 /**
863  * @brief Get the next sqe in the chain or transaction
864  *
865  * @param iodev_sqe Submission queue entry
866  *
867  * @retval NULL if current sqe is last in chain
868  * @retval struct rtio_iodev_sqe * if available
869  */
rtio_iodev_sqe_next(const struct rtio_iodev_sqe * iodev_sqe)870 static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
871 {
872 	return iodev_sqe->next;
873 }
874 
875 /**
876  * @brief Acquire a single submission queue event if available
877  *
878  * @param r RTIO context
879  *
880  * @retval sqe A valid submission queue event acquired from the submission queue
881  * @retval NULL No subsmission queue event available
882  */
rtio_sqe_acquire(struct rtio * r)883 static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
884 {
885 	struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
886 
887 	if (iodev_sqe == NULL) {
888 		return NULL;
889 	}
890 
891 	rtio_mpsc_push(&r->sq, &iodev_sqe->q);
892 
893 	return &iodev_sqe->sqe;
894 }
895 
896 /**
897  * @brief Drop all previously acquired sqe
898  *
899  * @param r RTIO context
900  */
rtio_sqe_drop_all(struct rtio * r)901 static inline void rtio_sqe_drop_all(struct rtio *r)
902 {
903 	struct rtio_iodev_sqe *iodev_sqe;
904 	struct rtio_mpsc_node *node = rtio_mpsc_pop(&r->sq);
905 
906 	while (node != NULL) {
907 		iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
908 		rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
909 		node = rtio_mpsc_pop(&r->sq);
910 	}
911 }
912 
913 /**
914  * @brief Acquire a complete queue event if available
915  */
rtio_cqe_acquire(struct rtio * r)916 static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
917 {
918 	struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
919 
920 	if (cqe == NULL) {
921 		return NULL;
922 	}
923 
924 	memset(cqe, 0, sizeof(struct rtio_cqe));
925 
926 	return cqe;
927 }
928 
929 /**
930  * @brief Produce a complete queue event if available
931  */
rtio_cqe_produce(struct rtio * r,struct rtio_cqe * cqe)932 static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
933 {
934 	rtio_mpsc_push(&r->cq, &cqe->q);
935 }
936 
937 /**
938  * @brief Consume a single completion queue event if available
939  *
940  * If a completion queue event is returned rtio_cq_release(r) must be called
941  * at some point to release the cqe spot for the cqe producer.
942  *
943  * @param r RTIO context
944  *
945  * @retval cqe A valid completion queue event consumed from the completion queue
946  * @retval NULL No completion queue event available
947  */
rtio_cqe_consume(struct rtio * r)948 static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
949 {
950 	struct rtio_mpsc_node *node;
951 	struct rtio_cqe *cqe = NULL;
952 
953 #ifdef CONFIG_RTIO_CONSUME_SEM
954 	if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
955 		return NULL;
956 	}
957 #endif
958 
959 	node = rtio_mpsc_pop(&r->cq);
960 	if (node == NULL) {
961 		return NULL;
962 	}
963 	cqe = CONTAINER_OF(node, struct rtio_cqe, q);
964 
965 	return cqe;
966 }
967 
968 /**
969  * @brief Wait for and consume a single completion queue event
970  *
971  * If a completion queue event is returned rtio_cq_release(r) must be called
972  * at some point to release the cqe spot for the cqe producer.
973  *
974  * @param r RTIO context
975  *
976  * @retval cqe A valid completion queue event consumed from the completion queue
977  */
rtio_cqe_consume_block(struct rtio * r)978 static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
979 {
980 	struct rtio_mpsc_node *node;
981 	struct rtio_cqe *cqe;
982 
983 #ifdef CONFIG_RTIO_CONSUME_SEM
984 	k_sem_take(r->consume_sem, K_FOREVER);
985 #endif
986 	node = rtio_mpsc_pop(&r->cq);
987 	while (node == NULL) {
988 		node = rtio_mpsc_pop(&r->cq);
989 		Z_SPIN_DELAY(1);
990 	}
991 	cqe = CONTAINER_OF(node, struct rtio_cqe, q);
992 
993 	return cqe;
994 }
995 
996 /**
997  * @brief Release consumed completion queue event
998  *
999  * @param r RTIO context
1000  * @param cqe Completion queue entry
1001  */
rtio_cqe_release(struct rtio * r,struct rtio_cqe * cqe)1002 static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1003 {
1004 	rtio_cqe_pool_free(r->cqe_pool, cqe);
1005 }
1006 
1007 /**
1008  * @brief Compute the CQE flags from the rtio_iodev_sqe entry
1009  *
1010  * @param iodev_sqe The SQE entry in question.
1011  * @return The value that should be set for the CQE's flags field.
1012  */
rtio_cqe_compute_flags(struct rtio_iodev_sqe * iodev_sqe)1013 static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1014 {
1015 	uint32_t flags = 0;
1016 
1017 	ARG_UNUSED(iodev_sqe);
1018 
1019 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1020 	if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1021 		struct rtio *r = iodev_sqe->r;
1022 		struct sys_mem_blocks *mem_pool = r->block_pool->mempool;
1023 		uint32_t block_size = r->block_pool->blk_size;
1024 		int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) / block_size;
1025 		int blk_count = iodev_sqe->sqe.buf_len / block_size;
1026 
1027 		flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1028 	}
1029 #endif
1030 
1031 	return flags;
1032 }
1033 
1034 /**
1035  * @brief Retrieve the mempool buffer that was allocated for the CQE.
1036  *
1037  * If the RTIO context contains a memory pool, and the SQE was created by calling
1038  * rtio_sqe_read_with_pool(), this function can be used to retrieve the memory associated with the
1039  * read. Once processing is done, it should be released by calling rtio_release_buffer().
1040  *
1041  * @param[in] r RTIO context
1042  * @param[in] cqe The CQE handling the event.
1043  * @param[out] buff Pointer to the mempool buffer
1044  * @param[out] buff_len Length of the allocated buffer
1045  * @return 0 on success
1046  * @return -EINVAL if the buffer wasn't allocated for this cqe
1047  * @return -ENOTSUP if memory blocks are disabled
1048  */
1049 __syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1050 					  uint8_t **buff, uint32_t *buff_len);
1051 
z_impl_rtio_cqe_get_mempool_buffer(const struct rtio * r,struct rtio_cqe * cqe,uint8_t ** buff,uint32_t * buff_len)1052 static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1053 						     uint8_t **buff, uint32_t *buff_len)
1054 {
1055 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1056 	if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) {
1057 		int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1058 		int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1059 
1060 		*buff = r->block_pool->mempool->buffer + blk_idx * r->block_pool->blk_size;
1061 		*buff_len = blk_count * r->block_pool->blk_size;
1062 		__ASSERT_NO_MSG(*buff >= r->block_pool->mempool->buffer);
1063 		__ASSERT_NO_MSG(*buff <
1064 				r->block_pool->mempool->buffer +
1065 				r->block_pool->blk_size * r->block_pool->mempool->num_blocks);
1066 		return 0;
1067 	}
1068 	return -EINVAL;
1069 #else
1070 	ARG_UNUSED(r);
1071 	ARG_UNUSED(cqe);
1072 	ARG_UNUSED(buff);
1073 	ARG_UNUSED(buff_len);
1074 
1075 	return -ENOTSUP;
1076 #endif
1077 }
1078 
1079 void rtio_executor_submit(struct rtio *r);
1080 void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1081 void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1082 
1083 /**
1084  * @brief Inform the executor of a submission completion with success
1085  *
1086  * This may start the next asynchronous request if one is available.
1087  *
1088  * @param iodev_sqe IODev Submission that has succeeded
1089  * @param result Result of the request
1090  */
rtio_iodev_sqe_ok(struct rtio_iodev_sqe * iodev_sqe,int result)1091 static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1092 {
1093 	rtio_executor_ok(iodev_sqe, result);
1094 }
1095 
1096 /**
1097  * @brief Inform the executor of a submissions completion with error
1098  *
1099  * This SHALL fail the remaining submissions in the chain.
1100  *
1101  * @param iodev_sqe Submission that has failed
1102  * @param result Result of the request
1103  */
rtio_iodev_sqe_err(struct rtio_iodev_sqe * iodev_sqe,int result)1104 static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1105 {
1106 	rtio_executor_err(iodev_sqe, result);
1107 }
1108 
1109 /**
1110  * @brief Cancel all requests that are pending for the iodev
1111  *
1112  * @param iodev IODev to cancel all requests for
1113  */
rtio_iodev_cancel_all(struct rtio_iodev * iodev)1114 static inline void rtio_iodev_cancel_all(struct rtio_iodev *iodev)
1115 {
1116 	/* Clear pending requests as -ENODATA */
1117 	struct rtio_mpsc_node *node = rtio_mpsc_pop(&iodev->iodev_sq);
1118 
1119 	while (node != NULL) {
1120 		struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1121 
1122 		rtio_iodev_sqe_err(iodev_sqe, -ECANCELED);
1123 		node = rtio_mpsc_pop(&iodev->iodev_sq);
1124 	}
1125 }
1126 
1127 /**
1128  * Submit a completion queue event with a given result and userdata
1129  *
1130  * Called by the executor to produce a completion queue event, no inherent
1131  * locking is performed and this is not safe to do from multiple callers.
1132  *
1133  * @param r RTIO context
1134  * @param result Integer result code (could be -errno)
1135  * @param userdata Userdata to pass along to completion
1136  * @param flags Flags to use for the CEQ see RTIO_CQE_FLAG_*
1137  */
rtio_cqe_submit(struct rtio * r,int result,void * userdata,uint32_t flags)1138 static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1139 {
1140 	struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1141 
1142 	if (cqe == NULL) {
1143 		atomic_inc(&r->xcqcnt);
1144 	} else {
1145 		cqe->result = result;
1146 		cqe->userdata = userdata;
1147 		cqe->flags = flags;
1148 		rtio_cqe_produce(r, cqe);
1149 	}
1150 #ifdef CONFIG_RTIO_SUBMIT_SEM
1151 	if (r->submit_count > 0) {
1152 		r->submit_count--;
1153 		if (r->submit_count == 0) {
1154 			k_sem_give(r->submit_sem);
1155 		}
1156 	}
1157 #endif
1158 #ifdef CONFIG_RTIO_CONSUME_SEM
1159 	k_sem_give(r->consume_sem);
1160 #endif
1161 }
1162 
1163 #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1164 
1165 /**
1166  * @brief Get the buffer associate with the RX submission
1167  *
1168  * @param[in] iodev_sqe   The submission to probe
1169  * @param[in] min_buf_len The minimum number of bytes needed for the operation
1170  * @param[in] max_buf_len The maximum number of bytes needed for the operation
1171  * @param[out] buf        Where to store the pointer to the buffer
1172  * @param[out] buf_len    Where to store the size of the buffer
1173  *
1174  * @return 0 if @p buf and @p buf_len were successfully filled
1175  * @return -ENOMEM Not enough memory for @p min_buf_len
1176  */
rtio_sqe_rx_buf(const struct rtio_iodev_sqe * iodev_sqe,uint32_t min_buf_len,uint32_t max_buf_len,uint8_t ** buf,uint32_t * buf_len)1177 static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1178 				  uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1179 {
1180 	struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1181 
1182 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1183 	if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1184 		struct rtio *r = iodev_sqe->r;
1185 
1186 		if (sqe->buf != NULL) {
1187 			if (sqe->buf_len < min_buf_len) {
1188 				return -ENOMEM;
1189 			}
1190 			*buf = sqe->buf;
1191 			*buf_len = sqe->buf_len;
1192 			return 0;
1193 		}
1194 
1195 		int rc = rtio_block_pool_alloc(r->block_pool, min_buf_len, max_buf_len,
1196 					       buf, buf_len);
1197 		if (rc == 0) {
1198 			sqe->buf = *buf;
1199 			sqe->buf_len = *buf_len;
1200 			return 0;
1201 		}
1202 
1203 		return -ENOMEM;
1204 	}
1205 #endif
1206 	if (sqe->buf_len < min_buf_len) {
1207 		return -ENOMEM;
1208 	}
1209 
1210 	*buf = sqe->buf;
1211 	*buf_len = sqe->buf_len;
1212 	return 0;
1213 }
1214 
1215 /**
1216  * @brief Release memory that was allocated by the RTIO's memory pool
1217  *
1218  * If the RTIO context was created by a call to RTIO_DEFINE_WITH_MEMPOOL(), then the cqe data might
1219  * contain a buffer that's owned by the RTIO context. In those cases (if the read request was
1220  * configured via rtio_sqe_read_with_pool()) the buffer must be returned back to the pool.
1221  *
1222  * Call this function when processing is complete. This function will validate that the memory
1223  * actually belongs to the RTIO context and will ignore invalid arguments.
1224  *
1225  * @param r RTIO context
1226  * @param buff Pointer to the buffer to be released.
1227  * @param buff_len Number of bytes to free (will be rounded up to nearest memory block).
1228  */
1229 __syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1230 
z_impl_rtio_release_buffer(struct rtio * r,void * buff,uint32_t buff_len)1231 static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1232 {
1233 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1234 	if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1235 		return;
1236 	}
1237 
1238 	rtio_block_pool_free(r->block_pool, buff, buff_len);
1239 #endif
1240 }
1241 
1242 /**
1243  * Grant access to an RTIO context to a user thread
1244  */
rtio_access_grant(struct rtio * r,struct k_thread * t)1245 static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1246 {
1247 	k_object_access_grant(r, t);
1248 
1249 #ifdef CONFIG_RTIO_SUBMIT_SEM
1250 	k_object_access_grant(r->submit_sem, t);
1251 #endif
1252 
1253 #ifdef CONFIG_RTIO_CONSUME_SEM
1254 	k_object_access_grant(r->consume_sem, t);
1255 #endif
1256 }
1257 
1258 /**
1259  * @brief Attempt to cancel an SQE
1260  *
1261  * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
1262  * result.
1263  *
1264  * @param[in] sqe The SQE to cancel
1265  * @return 0 if the SQE was flagged for cancellation
1266  * @return <0 on error
1267  */
1268 __syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1269 
z_impl_rtio_sqe_cancel(struct rtio_sqe * sqe)1270 static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1271 {
1272 	struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1273 
1274 	do {
1275 		iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1276 		iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1277 	} while (iodev_sqe != NULL);
1278 
1279 	return 0;
1280 }
1281 
1282 /**
1283  * @brief Copy an array of SQEs into the queue and get resulting handles back
1284  *
1285  * Copies one or more SQEs into the RTIO context and optionally returns their generated SQE handles.
1286  * Handles can be used to cancel events via the :c:func:`rtio_sqe_cancel` call.
1287  *
1288  * @param[in]  r RTIO context
1289  * @param[in]  sqes Pointer to an array of SQEs
1290  * @param[out] handle Optional pointer to :c:struct:`rtio_sqe` pointer to store the handle of the
1291  *             first generated SQE. Use NULL to ignore.
1292  * @param[in]  sqe_count Count of sqes in array
1293  *
1294  * @retval 0 success
1295  * @retval -ENOMEM not enough room in the queue
1296  */
1297 __syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1298 					   struct rtio_sqe **handle, size_t sqe_count);
1299 
z_impl_rtio_sqe_copy_in_get_handles(struct rtio * r,const struct rtio_sqe * sqes,struct rtio_sqe ** handle,size_t sqe_count)1300 static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1301 						      struct rtio_sqe **handle,
1302 						      size_t sqe_count)
1303 {
1304 	struct rtio_sqe *sqe;
1305 	uint32_t acquirable = rtio_sqe_acquirable(r);
1306 
1307 	if (acquirable < sqe_count) {
1308 		return -ENOMEM;
1309 	}
1310 
1311 	for (unsigned long i = 0; i < sqe_count; i++) {
1312 		sqe = rtio_sqe_acquire(r);
1313 		__ASSERT_NO_MSG(sqe != NULL);
1314 		if (handle != NULL && i == 0) {
1315 			*handle = sqe;
1316 		}
1317 		*sqe = sqes[i];
1318 	}
1319 
1320 	return 0;
1321 }
1322 
1323 /**
1324  * @brief Copy an array of SQEs into the queue
1325  *
1326  * Useful if a batch of submissions is stored in ROM or
1327  * RTIO is used from user mode where a copy must be made.
1328  *
1329  * Partial copying is not done as chained SQEs need to be submitted
1330  * as a whole set.
1331  *
1332  * @param r RTIO context
1333  * @param sqes Pointer to an array of SQEs
1334  * @param sqe_count Count of sqes in array
1335  *
1336  * @retval 0 success
1337  * @retval -ENOMEM not enough room in the queue
1338  */
rtio_sqe_copy_in(struct rtio * r,const struct rtio_sqe * sqes,size_t sqe_count)1339 static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1340 {
1341 	return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1342 }
1343 
1344 /**
1345  * @brief Copy an array of CQEs from the queue
1346  *
1347  * Copies from the RTIO context and its queue completion queue
1348  * events, waiting for the given time period to gather the number
1349  * of completions requested.
1350  *
1351  * @param r RTIO context
1352  * @param cqes Pointer to an array of SQEs
1353  * @param cqe_count Count of sqes in array
1354  * @param timeout Timeout to wait for each completion event. Total wait time is
1355  *                potentially timeout*cqe_count at maximum.
1356  *
1357  * @retval copy_count Count of copied CQEs (0 to cqe_count)
1358  */
1359 __syscall int rtio_cqe_copy_out(struct rtio *r,
1360 				struct rtio_cqe *cqes,
1361 				size_t cqe_count,
1362 				k_timeout_t timeout);
z_impl_rtio_cqe_copy_out(struct rtio * r,struct rtio_cqe * cqes,size_t cqe_count,k_timeout_t timeout)1363 static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1364 					   struct rtio_cqe *cqes,
1365 					   size_t cqe_count,
1366 					   k_timeout_t timeout)
1367 {
1368 	size_t copied = 0;
1369 	struct rtio_cqe *cqe;
1370 	int64_t end = sys_clock_timeout_end_calc(timeout);
1371 
1372 	do {
1373 		cqe = K_TIMEOUT_EQ(timeout, K_FOREVER) ? rtio_cqe_consume_block(r)
1374 						       : rtio_cqe_consume(r);
1375 		if (cqe == NULL) {
1376 #ifdef CONFIG_BOARD_NATIVE_POSIX
1377 			/* Native posix fakes the clock and only moves it forward when sleeping. */
1378 			k_sleep(K_TICKS(1));
1379 #else
1380 			Z_SPIN_DELAY(1);
1381 #endif
1382 			continue;
1383 		}
1384 		cqes[copied++] = *cqe;
1385 		rtio_cqe_release(r, cqe);
1386 	} while (copied < cqe_count && end > k_uptime_ticks());
1387 
1388 	return copied;
1389 }
1390 
1391 /**
1392  * @brief Submit I/O requests to the underlying executor
1393  *
1394  * Submits the queue of submission queue events to the executor.
1395  * The executor will do the work of managing tasks representing each
1396  * submission chain, freeing submission queue events when done, and
1397  * producing completion queue events as submissions are completed.
1398  *
1399  * @param r RTIO context
1400  * @param wait_count Number of submissions to wait for completion of.
1401  *
1402  * @retval 0 On success
1403  */
1404 __syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1405 
z_impl_rtio_submit(struct rtio * r,uint32_t wait_count)1406 static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1407 {
1408 	int res = 0;
1409 
1410 #ifdef CONFIG_RTIO_SUBMIT_SEM
1411 	/* TODO undefined behavior if another thread calls submit of course
1412 	 */
1413 	if (wait_count > 0) {
1414 		__ASSERT(!k_is_in_isr(),
1415 			 "expected rtio submit with wait count to be called from a thread");
1416 
1417 		k_sem_reset(r->submit_sem);
1418 		r->submit_count = wait_count;
1419 	}
1420 #endif
1421 
1422 	/* Submit the queue to the executor which consumes submissions
1423 	 * and produces completions through ISR chains or other means.
1424 	 */
1425 	rtio_executor_submit(r);
1426 
1427 
1428 	/* TODO could be nicer if we could suspend the thread and not
1429 	 * wake up on each completion here.
1430 	 */
1431 #ifdef CONFIG_RTIO_SUBMIT_SEM
1432 
1433 	if (wait_count > 0) {
1434 		res = k_sem_take(r->submit_sem, K_FOREVER);
1435 		__ASSERT(res == 0,
1436 			 "semaphore was reset or timed out while waiting on completions!");
1437 	}
1438 #else
1439 	while (rtio_cqe_consumable(r) < wait_count) {
1440 		Z_SPIN_DELAY(10);
1441 		k_yield();
1442 	}
1443 #endif
1444 
1445 	return res;
1446 }
1447 
1448 /**
1449  * @}
1450  */
1451 
1452 #ifdef __cplusplus
1453 }
1454 #endif
1455 
1456 #include <syscalls/rtio.h>
1457 
1458 #endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
1459