1 /*
2  * Copyright (c) 2022 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Real-Time IO device API for moving bytes with low effort
10  *
11  * RTIO is a context for asynchronous batch operations using a submission and completion queue.
12  *
13  * Asynchronous I/O operation are setup in a submission queue. Each entry in the queue describes
14  * the operation it wishes to perform with some understood semantics.
15  *
16  * These operations may be chained in a such a way that only when the current
17  * operation is complete the next will be executed. If the current operation fails
18  * all chained operations will also fail.
19  *
20  * Operations may also be submitted as a transaction where a set of operations are considered
21  * to be one operation.
22  *
23  * The completion of these operations typically provide one or more completion queue events.
24  */
25 
26 #ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27 #define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28 
29 #include <string.h>
30 
31 #include <zephyr/app_memory/app_memdomain.h>
32 #include <zephyr/device.h>
33 #include <zephyr/kernel.h>
34 #include <zephyr/sys/__assert.h>
35 #include <zephyr/sys/atomic.h>
36 #include <zephyr/sys/mem_blocks.h>
37 #include <zephyr/sys/util.h>
38 #include <zephyr/sys/iterable_sections.h>
39 #include <zephyr/sys/mpsc_lockfree.h>
40 
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44 
45 
46 /**
47  * @brief RTIO
48  * @defgroup rtio RTIO
49  * @since 3.2
50  * @version 0.1.0
51  * @ingroup os_services
52  * @{
53  */
54 
55 /**
56  * @brief RTIO Predefined Priorities
57  * @defgroup rtio_sqe_prio RTIO Priorities
58  * @ingroup rtio
59  * @{
60  */
61 
62 /**
63  * @brief Low priority
64  */
65 #define RTIO_PRIO_LOW 0U
66 
67 /**
68  * @brief Normal priority
69  */
70 #define RTIO_PRIO_NORM 127U
71 
72 /**
73  * @brief High priority
74  */
75 #define RTIO_PRIO_HIGH 255U
76 
77 /**
78  * @}
79  */
80 
81 
82 /**
83  * @brief RTIO SQE Flags
84  * @defgroup rtio_sqe_flags RTIO SQE Flags
85  * @ingroup rtio
86  * @{
87  */
88 
89 /**
90  * @brief The next request in the queue should wait on this one.
91  *
92  * Chained SQEs are individual units of work describing patterns of
93  * ordering and failure cascading. A chained SQE must be started only
94  * after the one before it. They are given to the iodevs one after another.
95  */
96 #define RTIO_SQE_CHAINED BIT(0)
97 
98 /**
99  * @brief The next request in the queue is part of a transaction.
100  *
101  * Transactional SQEs are sequential parts of a unit of work.
102  * Only the first transactional SQE is submitted to an iodev, the
103  * remaining SQEs are never individually submitted but instead considered
104  * to be part of the transaction to the single iodev. The first sqe in the
105  * sequence holds the iodev that will be used and the last holds the userdata
106  * that will be returned in a single completion on failure/success.
107  */
108 #define RTIO_SQE_TRANSACTION BIT(1)
109 
110 
111 /**
112  * @brief The buffer should be allocated by the RTIO mempool
113  *
114  * This flag can only exist if the CONFIG_RTIO_SYS_MEM_BLOCKS Kconfig was
115  * enabled and the RTIO context was created via the RTIO_DEFINE_WITH_MEMPOOL()
116  * macro. If set, the buffer associated with the entry was allocated by the
117  * internal memory pool and should be released as soon as it is no longer
118  * needed via a call to rtio_release_mempool().
119  */
120 #define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
121 
122 /**
123  * @brief The SQE should not execute if possible
124  *
125  * If possible (not yet executed), the SQE should be canceled by flagging it as failed and returning
126  * -ECANCELED as the result.
127  */
128 #define RTIO_SQE_CANCELED BIT(3)
129 
130 /**
131  * @brief The SQE should continue producing CQEs until canceled
132  *
133  * This flag must exist along @ref RTIO_SQE_MEMPOOL_BUFFER and signals that when a read is
134  * complete. It should be placed back in queue until canceled.
135  */
136 #define RTIO_SQE_MULTISHOT BIT(4)
137 
138 /**
139  * @brief The SQE does not produce a CQE.
140  */
141 #define RTIO_SQE_NO_RESPONSE BIT(5)
142 
143 /**
144  * @}
145  */
146 
147 /**
148  * @brief RTIO CQE Flags
149  * @defgroup rtio_cqe_flags RTIO CQE Flags
150  * @ingroup rtio
151  * @{
152  */
153 
154 /**
155  * @brief The entry's buffer was allocated from the RTIO's mempool
156  *
157  * If this bit is set, the buffer was allocated from the memory pool and should be recycled as
158  * soon as the application is done with it.
159  */
160 #define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
161 
162 #define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
163 
164 /**
165  * @brief Get the block index of a mempool flags
166  *
167  * @param flags The CQE flags value
168  * @return The block index portion of the flags field.
169  */
170 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
171 
172 /**
173  * @brief Get the block count of a mempool flags
174  *
175  * @param flags The CQE flags value
176  * @return The block count portion of the flags field.
177  */
178 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
179 
180 /**
181  * @brief Prepare CQE flags for a mempool read.
182  *
183  * @param blk_idx The mempool block index
184  * @param blk_cnt The mempool block count
185  * @return A shifted and masked value that can be added to the flags field with an OR operator.
186  */
187 #define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)                                               \
188 	(FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) |                                 \
189 	 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
190 
191 /**
192  * @}
193  */
194 
195 /**
196  * @brief Equivalent to the I2C_MSG_STOP flag
197  */
198 #define RTIO_IODEV_I2C_STOP BIT(1)
199 
200 /**
201  * @brief Equivalent to the I2C_MSG_RESTART flag
202  */
203 #define RTIO_IODEV_I2C_RESTART BIT(2)
204 
205 /**
206  * @brief Equivalent to the I2C_MSG_ADDR_10_BITS
207  */
208 #define RTIO_IODEV_I2C_10_BITS BIT(3)
209 
210 /** @cond ignore */
211 struct rtio;
212 struct rtio_cqe;
213 struct rtio_sqe;
214 struct rtio_sqe_pool;
215 struct rtio_cqe_pool;
216 struct rtio_iodev;
217 struct rtio_iodev_sqe;
218 /** @endcond */
219 
220 /**
221  * @typedef rtio_callback_t
222  * @brief Callback signature for RTIO_OP_CALLBACK
223  * @param r RTIO context being used with the callback
224  * @param sqe Submission for the callback op
225  * @param arg0 Argument option as part of the sqe
226  */
227 typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
228 
229 /**
230  * @brief A submission queue event
231  */
232 struct rtio_sqe {
233 	uint8_t op; /**< Op code */
234 
235 	uint8_t prio; /**< Op priority */
236 
237 	uint16_t flags; /**< Op Flags */
238 
239 	uint16_t iodev_flags; /**< Op iodev flags */
240 
241 	uint16_t _resv0;
242 
243 	const struct rtio_iodev *iodev; /**< Device to operation on */
244 
245 	/**
246 	 * User provided data which is returned upon operation completion. Could be a pointer or
247 	 * integer.
248 	 *
249 	 * If unique identification of completions is desired this should be
250 	 * unique as well.
251 	 */
252 	void *userdata;
253 
254 	union {
255 
256 		/** OP_TX, OP_RX */
257 		struct {
258 			uint32_t buf_len; /**< Length of buffer */
259 			uint8_t *buf; /**< Buffer to use*/
260 		};
261 
262 		/** OP_TINY_TX */
263 		struct {
264 			uint8_t tiny_buf_len; /**< Length of tiny buffer */
265 			uint8_t tiny_buf[7]; /**< Tiny buffer */
266 		};
267 
268 		/** OP_CALLBACK */
269 		struct {
270 			rtio_callback_t callback;
271 			void *arg0; /**< Last argument given to callback */
272 		};
273 
274 		/** OP_TXRX */
275 		struct {
276 			uint32_t txrx_buf_len;
277 			uint8_t *tx_buf;
278 			uint8_t *rx_buf;
279 		};
280 
281 		/** OP_I2C_CONFIGURE */
282 		uint32_t i2c_config;
283 	};
284 };
285 
286 /** @cond ignore */
287 /* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
288 BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
289 /** @endcond */
290 
291 /**
292  * @brief A completion queue event
293  */
294 struct rtio_cqe {
295 	struct mpsc_node q;
296 
297 	int32_t result; /**< Result from operation */
298 	void *userdata; /**< Associated userdata with operation */
299 	uint32_t flags; /**< Flags associated with the operation */
300 };
301 
302 struct rtio_sqe_pool {
303 	struct mpsc free_q;
304 	const uint16_t pool_size;
305 	uint16_t pool_free;
306 	struct rtio_iodev_sqe *pool;
307 };
308 
309 struct rtio_cqe_pool {
310 	struct mpsc free_q;
311 	const uint16_t pool_size;
312 	uint16_t pool_free;
313 	struct rtio_cqe *pool;
314 };
315 
316 /**
317  * @brief An RTIO context containing what can be viewed as a pair of queues.
318  *
319  * A queue for submissions (available and in queue to be produced) as well as a queue
320  * of completions (available and ready to be consumed).
321  *
322  * The rtio executor along with any objects implementing the rtio_iodev interface are
323  * the consumers of submissions and producers of completions.
324  *
325  * No work is started until rtio_submit() is called.
326  */
327 struct rtio {
328 #ifdef CONFIG_RTIO_SUBMIT_SEM
329 	/* A wait semaphore which may suspend the calling thread
330 	 * to wait for some number of completions when calling submit
331 	 */
332 	struct k_sem *submit_sem;
333 
334 	uint32_t submit_count;
335 #endif
336 
337 #ifdef CONFIG_RTIO_CONSUME_SEM
338 	/* A wait semaphore which may suspend the calling thread
339 	 * to wait for some number of completions while consuming
340 	 * them from the completion queue
341 	 */
342 	struct k_sem *consume_sem;
343 #endif
344 
345 	/* Total number of completions */
346 	atomic_t cq_count;
347 
348 	/* Number of completions that were unable to be submitted with results
349 	 * due to the cq spsc being full
350 	 */
351 	atomic_t xcqcnt;
352 
353 	/* Submission queue object pool with free list */
354 	struct rtio_sqe_pool *sqe_pool;
355 
356 	/* Complete queue object pool with free list */
357 	struct rtio_cqe_pool *cqe_pool;
358 
359 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
360 	/* Mem block pool */
361 	struct sys_mem_blocks *block_pool;
362 #endif
363 
364 	/* Submission queue */
365 	struct mpsc sq;
366 
367 	/* Completion queue */
368 	struct mpsc cq;
369 };
370 
371 /** The memory partition associated with all RTIO context information */
372 extern struct k_mem_partition rtio_partition;
373 
374 /**
375  * @brief Get the mempool block size of the RTIO context
376  *
377  * @param[in] r The RTIO context
378  * @return The size of each block in the context's mempool
379  * @return 0 if the context doesn't have a mempool
380  */
rtio_mempool_block_size(const struct rtio * r)381 static inline size_t rtio_mempool_block_size(const struct rtio *r)
382 {
383 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
384 	ARG_UNUSED(r);
385 	return 0;
386 #else
387 	if (r == NULL || r->block_pool == NULL) {
388 		return 0;
389 	}
390 	return BIT(r->block_pool->info.blk_sz_shift);
391 #endif
392 }
393 
394 /**
395  * @brief Compute the mempool block index for a given pointer
396  *
397  * @param[in] r RTIO context
398  * @param[in] ptr Memory pointer in the mempool
399  * @return Index of the mempool block associated with the pointer. Or UINT16_MAX if invalid.
400  */
401 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
__rtio_compute_mempool_block_index(const struct rtio * r,const void * ptr)402 static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
403 {
404 	uintptr_t addr = (uintptr_t)ptr;
405 	struct sys_mem_blocks *mem_pool = r->block_pool;
406 	uint32_t block_size = rtio_mempool_block_size(r);
407 
408 	uintptr_t buff = (uintptr_t)mem_pool->buffer;
409 	uint32_t buff_size = mem_pool->info.num_blocks * block_size;
410 
411 	if (addr < buff || addr >= buff + buff_size) {
412 		return UINT16_MAX;
413 	}
414 	return (addr - buff) / block_size;
415 }
416 #endif
417 
418 /**
419  * @brief IO device submission queue entry
420  *
421  * May be cast safely to and from a rtio_sqe as they occupy the same memory provided by the pool
422  */
423 struct rtio_iodev_sqe {
424 	struct rtio_sqe sqe;
425 	struct mpsc_node q;
426 	struct rtio_iodev_sqe *next;
427 	struct rtio *r;
428 };
429 
430 /**
431  * @brief API that an RTIO IO device should implement
432  */
433 struct rtio_iodev_api {
434 	/**
435 	 * @brief Submit to the iodev an entry to work on
436 	 *
437 	 * This call should be short in duration and most likely
438 	 * either enqueue or kick off an entry with the hardware.
439 	 *
440 	 * @param iodev_sqe Submission queue entry
441 	 */
442 	void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
443 };
444 
445 /**
446  * @brief An IO device with a function table for submitting requests
447  */
448 struct rtio_iodev {
449 	/* Function pointer table */
450 	const struct rtio_iodev_api *api;
451 
452 	/* Data associated with this iodev */
453 	void *data;
454 };
455 
456 /** An operation that does nothing and will complete immediately */
457 #define RTIO_OP_NOP 0
458 
459 /** An operation that receives (reads) */
460 #define RTIO_OP_RX (RTIO_OP_NOP+1)
461 
462 /** An operation that transmits (writes) */
463 #define RTIO_OP_TX (RTIO_OP_RX+1)
464 
465 /** An operation that transmits tiny writes by copying the data to write */
466 #define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
467 
468 /** An operation that calls a given function (callback) */
469 #define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
470 
471 /** An operation that transceives (reads and writes simultaneously) */
472 #define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
473 
474 /** An operation to recover I2C buses */
475 #define RTIO_OP_I2C_RECOVER (RTIO_OP_TXRX+1)
476 
477 /** An operation to configure I2C buses */
478 #define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
479 
480 /**
481  * @brief Prepare a nop (no op) submission
482  */
rtio_sqe_prep_nop(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,void * userdata)483 static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
484 				const struct rtio_iodev *iodev,
485 				void *userdata)
486 {
487 	memset(sqe, 0, sizeof(struct rtio_sqe));
488 	sqe->op = RTIO_OP_NOP;
489 	sqe->iodev = iodev;
490 	sqe->userdata = userdata;
491 }
492 
493 /**
494  * @brief Prepare a read op submission
495  */
rtio_sqe_prep_read(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)496 static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
497 				      const struct rtio_iodev *iodev,
498 				      int8_t prio,
499 				      uint8_t *buf,
500 				      uint32_t len,
501 				      void *userdata)
502 {
503 	memset(sqe, 0, sizeof(struct rtio_sqe));
504 	sqe->op = RTIO_OP_RX;
505 	sqe->prio = prio;
506 	sqe->iodev = iodev;
507 	sqe->buf_len = len;
508 	sqe->buf = buf;
509 	sqe->userdata = userdata;
510 }
511 
512 /**
513  * @brief Prepare a read op submission with context's mempool
514  *
515  * @see rtio_sqe_prep_read()
516  */
rtio_sqe_prep_read_with_pool(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)517 static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
518 						const struct rtio_iodev *iodev, int8_t prio,
519 						void *userdata)
520 {
521 	rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
522 	sqe->flags = RTIO_SQE_MEMPOOL_BUFFER;
523 }
524 
rtio_sqe_prep_read_multishot(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)525 static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
526 						const struct rtio_iodev *iodev, int8_t prio,
527 						void *userdata)
528 {
529 	rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
530 	sqe->flags |= RTIO_SQE_MULTISHOT;
531 }
532 
533 /**
534  * @brief Prepare a write op submission
535  */
rtio_sqe_prep_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)536 static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
537 				       const struct rtio_iodev *iodev,
538 				       int8_t prio,
539 				       uint8_t *buf,
540 				       uint32_t len,
541 				       void *userdata)
542 {
543 	memset(sqe, 0, sizeof(struct rtio_sqe));
544 	sqe->op = RTIO_OP_TX;
545 	sqe->prio = prio;
546 	sqe->iodev = iodev;
547 	sqe->buf_len = len;
548 	sqe->buf = buf;
549 	sqe->userdata = userdata;
550 }
551 
552 /**
553  * @brief Prepare a tiny write op submission
554  *
555  * Unlike the normal write operation where the source buffer must outlive the call
556  * the tiny write data in this case is copied to the sqe. It must be tiny to fit
557  * within the specified size of a rtio_sqe.
558  *
559  * This is useful in many scenarios with RTL logic where a write of the register to
560  * subsequently read must be done.
561  */
rtio_sqe_prep_tiny_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,const uint8_t * tiny_write_data,uint8_t tiny_write_len,void * userdata)562 static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
563 					    const struct rtio_iodev *iodev,
564 					    int8_t prio,
565 					    const uint8_t *tiny_write_data,
566 					    uint8_t tiny_write_len,
567 					    void *userdata)
568 {
569 	__ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_buf));
570 
571 	memset(sqe, 0, sizeof(struct rtio_sqe));
572 	sqe->op = RTIO_OP_TINY_TX;
573 	sqe->prio = prio;
574 	sqe->iodev = iodev;
575 	sqe->tiny_buf_len = tiny_write_len;
576 	memcpy(sqe->tiny_buf, tiny_write_data, tiny_write_len);
577 	sqe->userdata = userdata;
578 }
579 
580 /**
581  * @brief Prepare a callback op submission
582  *
583  * A somewhat special operation in that it may only be done in kernel mode.
584  *
585  * Used where general purpose logic is required in a queue of io operations to do
586  * transforms or logic.
587  */
rtio_sqe_prep_callback(struct rtio_sqe * sqe,rtio_callback_t callback,void * arg0,void * userdata)588 static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
589 					  rtio_callback_t callback,
590 					  void *arg0,
591 					  void *userdata)
592 {
593 	memset(sqe, 0, sizeof(struct rtio_sqe));
594 	sqe->op = RTIO_OP_CALLBACK;
595 	sqe->prio = 0;
596 	sqe->iodev = NULL;
597 	sqe->callback = callback;
598 	sqe->arg0 = arg0;
599 	sqe->userdata = userdata;
600 }
601 
602 /**
603  * @brief Prepare a transceive op submission
604  */
rtio_sqe_prep_transceive(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * tx_buf,uint8_t * rx_buf,uint32_t buf_len,void * userdata)605 static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
606 					    const struct rtio_iodev *iodev,
607 					    int8_t prio,
608 					    uint8_t *tx_buf,
609 					    uint8_t *rx_buf,
610 					    uint32_t buf_len,
611 					    void *userdata)
612 {
613 	memset(sqe, 0, sizeof(struct rtio_sqe));
614 	sqe->op = RTIO_OP_TXRX;
615 	sqe->prio = prio;
616 	sqe->iodev = iodev;
617 	sqe->txrx_buf_len = buf_len;
618 	sqe->tx_buf = tx_buf;
619 	sqe->rx_buf = rx_buf;
620 	sqe->userdata = userdata;
621 }
622 
rtio_sqe_pool_alloc(struct rtio_sqe_pool * pool)623 static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
624 {
625 	struct mpsc_node *node = mpsc_pop(&pool->free_q);
626 
627 	if (node == NULL) {
628 		return NULL;
629 	}
630 
631 	struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
632 
633 	pool->pool_free--;
634 
635 	return iodev_sqe;
636 }
637 
rtio_sqe_pool_free(struct rtio_sqe_pool * pool,struct rtio_iodev_sqe * iodev_sqe)638 static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
639 {
640 	mpsc_push(&pool->free_q, &iodev_sqe->q);
641 
642 	pool->pool_free++;
643 }
644 
rtio_cqe_pool_alloc(struct rtio_cqe_pool * pool)645 static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
646 {
647 	struct mpsc_node *node = mpsc_pop(&pool->free_q);
648 
649 	if (node == NULL) {
650 		return NULL;
651 	}
652 
653 	struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
654 
655 	memset(cqe, 0, sizeof(struct rtio_cqe));
656 
657 	pool->pool_free--;
658 
659 	return cqe;
660 }
661 
rtio_cqe_pool_free(struct rtio_cqe_pool * pool,struct rtio_cqe * cqe)662 static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
663 {
664 	mpsc_push(&pool->free_q, &cqe->q);
665 
666 	pool->pool_free++;
667 }
668 
rtio_block_pool_alloc(struct rtio * r,size_t min_sz,size_t max_sz,uint8_t ** buf,uint32_t * buf_len)669 static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
670 					  size_t max_sz, uint8_t **buf, uint32_t *buf_len)
671 {
672 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
673 	ARG_UNUSED(r);
674 	ARG_UNUSED(min_sz);
675 	ARG_UNUSED(max_sz);
676 	ARG_UNUSED(buf);
677 	ARG_UNUSED(buf_len);
678 	return -ENOTSUP;
679 #else
680 	const uint32_t block_size = rtio_mempool_block_size(r);
681 	uint32_t bytes = max_sz;
682 
683 	/* Not every context has a block pool and the block size may return 0 in
684 	 * that case
685 	 */
686 	if (block_size == 0) {
687 		return -ENOMEM;
688 	}
689 
690 	do {
691 		size_t num_blks = DIV_ROUND_UP(bytes, block_size);
692 		int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
693 
694 		if (rc == 0) {
695 			*buf_len = num_blks * block_size;
696 			return 0;
697 		}
698 
699 		bytes -= block_size;
700 	} while (bytes >= min_sz);
701 
702 	return -ENOMEM;
703 #endif
704 }
705 
rtio_block_pool_free(struct rtio * r,void * buf,uint32_t buf_len)706 static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
707 {
708 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
709 	ARG_UNUSED(r);
710 	ARG_UNUSED(buf);
711 	ARG_UNUSED(buf_len);
712 #else
713 	size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
714 
715 	sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
716 #endif
717 }
718 
719 /* Do not try and reformat the macros */
720 /* clang-format off */
721 
722 /**
723  * @brief Statically define and initialize an RTIO IODev
724  *
725  * @param name Name of the iodev
726  * @param iodev_api Pointer to struct rtio_iodev_api
727  * @param iodev_data Data pointer
728  */
729 #define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data)		\
730 	STRUCT_SECTION_ITERABLE(rtio_iodev, name) = {		\
731 		.api = (iodev_api),				\
732 		.data = (iodev_data),				\
733 	}
734 
735 #define Z_RTIO_SQE_POOL_DEFINE(name, sz)			\
736 	static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz];	\
737 	STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = {	\
738 		.free_q = MPSC_INIT((name.free_q)),	\
739 		.pool_size = sz,				\
740 		.pool_free = sz,				\
741 		.pool = CONCAT(_sqe_pool_, name),		\
742 	}
743 
744 
745 #define Z_RTIO_CQE_POOL_DEFINE(name, sz)			\
746 	static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz];	\
747 	STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = {	\
748 		.free_q = MPSC_INIT((name.free_q)),	\
749 		.pool_size = sz,				\
750 		.pool_free = sz,				\
751 		.pool = CONCAT(_cqe_pool_, name),		\
752 	}
753 
754 /**
755  * @brief Allocate to bss if available
756  *
757  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition bss. Maps to:
758  *   K_APP_BMEM(rtio_partition) static
759  *
760  * If CONFIG_USERSPACE is disabled, allocate as plain static:
761  *   static
762  */
763 #define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
764 
765 /**
766  * @brief Allocate as initialized memory if available
767  *
768  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition init. Maps to:
769  *   K_APP_DMEM(rtio_partition) static
770  *
771  * If CONFIG_USERSPACE is disabled, allocate as plain static:
772  *   static
773  */
774 #define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
775 
776 #define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align)                                 \
777 	RTIO_BMEM uint8_t __aligned(WB_UP(blk_align))                                              \
778 	CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)];                                         \
779 	_SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt,                          \
780 					    CONCAT(_block_pool_, name),	RTIO_DMEM)
781 
782 #define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool)                                     \
783 	IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM,                                                         \
784 		   (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT)))          \
785 	IF_ENABLED(CONFIG_RTIO_CONSUME_SEM,                                                        \
786 		   (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT)))         \
787 	STRUCT_SECTION_ITERABLE(rtio, name) = {                                                    \
788 		IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),))   \
789 		IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,))                           \
790 		IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
791 		.cq_count = ATOMIC_INIT(0),                                                        \
792 		.xcqcnt = ATOMIC_INIT(0),                                                          \
793 		.sqe_pool = _sqe_pool,                                                             \
794 		.cqe_pool = _cqe_pool,                                                             \
795 		IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,))               \
796 		.sq = MPSC_INIT((name.sq)),                                                        \
797 		.cq = MPSC_INIT((name.cq)),                                                        \
798 	}
799 
800 /**
801  * @brief Statically define and initialize an RTIO context
802  *
803  * @param name Name of the RTIO
804  * @param sq_sz Size of the submission queue entry pool
805  * @param cq_sz Size of the completion queue entry pool
806  */
807 #define RTIO_DEFINE(name, sq_sz, cq_sz)						\
808 	Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz);			\
809 	Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz);			\
810 	Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool),				\
811 		      &CONCAT(name, _cqe_pool), NULL)
812 
813 /* clang-format on */
814 
815 /**
816  * @brief Statically define and initialize an RTIO context
817  *
818  * @param name Name of the RTIO
819  * @param sq_sz Size of the submission queue, must be power of 2
820  * @param cq_sz Size of the completion queue, must be power of 2
821  * @param num_blks Number of blocks in the memory pool
822  * @param blk_size The number of bytes in each block
823  * @param balign The block alignment
824  */
825 #define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
826 	Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz);		\
827 	Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz);			\
828 	Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
829 	Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
830 
831 /* clang-format on */
832 
833 /**
834  * @brief Count of acquirable submission queue events
835  *
836  * @param r RTIO context
837  *
838  * @return Count of acquirable submission queue events
839  */
rtio_sqe_acquirable(struct rtio * r)840 static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
841 {
842 	return r->sqe_pool->pool_free;
843 }
844 
845 /**
846  * @brief Get the next sqe in the transaction
847  *
848  * @param iodev_sqe Submission queue entry
849  *
850  * @retval NULL if current sqe is last in transaction
851  * @retval struct rtio_sqe * if available
852  */
rtio_txn_next(const struct rtio_iodev_sqe * iodev_sqe)853 static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
854 {
855 	if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
856 		return iodev_sqe->next;
857 	} else {
858 		return NULL;
859 	}
860 }
861 
862 
863 /**
864  * @brief Get the next sqe in the chain
865  *
866  * @param iodev_sqe Submission queue entry
867  *
868  * @retval NULL if current sqe is last in chain
869  * @retval struct rtio_sqe * if available
870  */
rtio_chain_next(const struct rtio_iodev_sqe * iodev_sqe)871 static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
872 {
873 	if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
874 		return iodev_sqe->next;
875 	} else {
876 		return NULL;
877 	}
878 }
879 
880 /**
881  * @brief Get the next sqe in the chain or transaction
882  *
883  * @param iodev_sqe Submission queue entry
884  *
885  * @retval NULL if current sqe is last in chain
886  * @retval struct rtio_iodev_sqe * if available
887  */
rtio_iodev_sqe_next(const struct rtio_iodev_sqe * iodev_sqe)888 static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
889 {
890 	return iodev_sqe->next;
891 }
892 
893 /**
894  * @brief Acquire a single submission queue event if available
895  *
896  * @param r RTIO context
897  *
898  * @retval sqe A valid submission queue event acquired from the submission queue
899  * @retval NULL No subsmission queue event available
900  */
rtio_sqe_acquire(struct rtio * r)901 static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
902 {
903 	struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
904 
905 	if (iodev_sqe == NULL) {
906 		return NULL;
907 	}
908 
909 	mpsc_push(&r->sq, &iodev_sqe->q);
910 
911 	return &iodev_sqe->sqe;
912 }
913 
914 /**
915  * @brief Drop all previously acquired sqe
916  *
917  * @param r RTIO context
918  */
rtio_sqe_drop_all(struct rtio * r)919 static inline void rtio_sqe_drop_all(struct rtio *r)
920 {
921 	struct rtio_iodev_sqe *iodev_sqe;
922 	struct mpsc_node *node = mpsc_pop(&r->sq);
923 
924 	while (node != NULL) {
925 		iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
926 		rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
927 		node = mpsc_pop(&r->sq);
928 	}
929 }
930 
931 /**
932  * @brief Acquire a complete queue event if available
933  */
rtio_cqe_acquire(struct rtio * r)934 static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
935 {
936 	struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
937 
938 	if (cqe == NULL) {
939 		return NULL;
940 	}
941 
942 	memset(cqe, 0, sizeof(struct rtio_cqe));
943 
944 	return cqe;
945 }
946 
947 /**
948  * @brief Produce a complete queue event if available
949  */
rtio_cqe_produce(struct rtio * r,struct rtio_cqe * cqe)950 static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
951 {
952 	mpsc_push(&r->cq, &cqe->q);
953 }
954 
955 /**
956  * @brief Consume a single completion queue event if available
957  *
958  * If a completion queue event is returned rtio_cq_release(r) must be called
959  * at some point to release the cqe spot for the cqe producer.
960  *
961  * @param r RTIO context
962  *
963  * @retval cqe A valid completion queue event consumed from the completion queue
964  * @retval NULL No completion queue event available
965  */
rtio_cqe_consume(struct rtio * r)966 static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
967 {
968 	struct mpsc_node *node;
969 	struct rtio_cqe *cqe = NULL;
970 
971 #ifdef CONFIG_RTIO_CONSUME_SEM
972 	if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
973 		return NULL;
974 	}
975 #endif
976 
977 	node = mpsc_pop(&r->cq);
978 	if (node == NULL) {
979 		return NULL;
980 	}
981 	cqe = CONTAINER_OF(node, struct rtio_cqe, q);
982 
983 	return cqe;
984 }
985 
986 /**
987  * @brief Wait for and consume a single completion queue event
988  *
989  * If a completion queue event is returned rtio_cq_release(r) must be called
990  * at some point to release the cqe spot for the cqe producer.
991  *
992  * @param r RTIO context
993  *
994  * @retval cqe A valid completion queue event consumed from the completion queue
995  */
rtio_cqe_consume_block(struct rtio * r)996 static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
997 {
998 	struct mpsc_node *node;
999 	struct rtio_cqe *cqe;
1000 
1001 #ifdef CONFIG_RTIO_CONSUME_SEM
1002 	k_sem_take(r->consume_sem, K_FOREVER);
1003 #endif
1004 	node = mpsc_pop(&r->cq);
1005 	while (node == NULL) {
1006 		Z_SPIN_DELAY(1);
1007 		node = mpsc_pop(&r->cq);
1008 	}
1009 	cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1010 
1011 	return cqe;
1012 }
1013 
1014 /**
1015  * @brief Release consumed completion queue event
1016  *
1017  * @param r RTIO context
1018  * @param cqe Completion queue entry
1019  */
rtio_cqe_release(struct rtio * r,struct rtio_cqe * cqe)1020 static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1021 {
1022 	rtio_cqe_pool_free(r->cqe_pool, cqe);
1023 }
1024 
1025 /**
1026  * @brief Compute the CQE flags from the rtio_iodev_sqe entry
1027  *
1028  * @param iodev_sqe The SQE entry in question.
1029  * @return The value that should be set for the CQE's flags field.
1030  */
rtio_cqe_compute_flags(struct rtio_iodev_sqe * iodev_sqe)1031 static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1032 {
1033 	uint32_t flags = 0;
1034 
1035 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1036 	if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1037 		struct rtio *r = iodev_sqe->r;
1038 		struct sys_mem_blocks *mem_pool = r->block_pool;
1039 		int blk_index = (iodev_sqe->sqe.buf - mem_pool->buffer) >>
1040 				mem_pool->info.blk_sz_shift;
1041 		int blk_count = iodev_sqe->sqe.buf_len >> mem_pool->info.blk_sz_shift;
1042 
1043 		flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1044 	}
1045 #else
1046 	ARG_UNUSED(iodev_sqe);
1047 #endif
1048 
1049 	return flags;
1050 }
1051 
1052 /**
1053  * @brief Retrieve the mempool buffer that was allocated for the CQE.
1054  *
1055  * If the RTIO context contains a memory pool, and the SQE was created by calling
1056  * rtio_sqe_read_with_pool(), this function can be used to retrieve the memory associated with the
1057  * read. Once processing is done, it should be released by calling rtio_release_buffer().
1058  *
1059  * @param[in] r RTIO context
1060  * @param[in] cqe The CQE handling the event.
1061  * @param[out] buff Pointer to the mempool buffer
1062  * @param[out] buff_len Length of the allocated buffer
1063  * @return 0 on success
1064  * @return -EINVAL if the buffer wasn't allocated for this cqe
1065  * @return -ENOTSUP if memory blocks are disabled
1066  */
1067 __syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1068 					  uint8_t **buff, uint32_t *buff_len);
1069 
z_impl_rtio_cqe_get_mempool_buffer(const struct rtio * r,struct rtio_cqe * cqe,uint8_t ** buff,uint32_t * buff_len)1070 static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1071 						     uint8_t **buff, uint32_t *buff_len)
1072 {
1073 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1074 	if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) {
1075 		int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1076 		int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1077 		uint32_t blk_size = rtio_mempool_block_size(r);
1078 
1079 		*buff = r->block_pool->buffer + blk_idx * blk_size;
1080 		*buff_len = blk_count * blk_size;
1081 		__ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1082 		__ASSERT_NO_MSG(*buff <
1083 				r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1084 		return 0;
1085 	}
1086 	return -EINVAL;
1087 #else
1088 	ARG_UNUSED(r);
1089 	ARG_UNUSED(cqe);
1090 	ARG_UNUSED(buff);
1091 	ARG_UNUSED(buff_len);
1092 
1093 	return -ENOTSUP;
1094 #endif
1095 }
1096 
1097 void rtio_executor_submit(struct rtio *r);
1098 void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1099 void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1100 
1101 /**
1102  * @brief Inform the executor of a submission completion with success
1103  *
1104  * This may start the next asynchronous request if one is available.
1105  *
1106  * @param iodev_sqe IODev Submission that has succeeded
1107  * @param result Result of the request
1108  */
rtio_iodev_sqe_ok(struct rtio_iodev_sqe * iodev_sqe,int result)1109 static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1110 {
1111 	rtio_executor_ok(iodev_sqe, result);
1112 }
1113 
1114 /**
1115  * @brief Inform the executor of a submissions completion with error
1116  *
1117  * This SHALL fail the remaining submissions in the chain.
1118  *
1119  * @param iodev_sqe Submission that has failed
1120  * @param result Result of the request
1121  */
rtio_iodev_sqe_err(struct rtio_iodev_sqe * iodev_sqe,int result)1122 static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1123 {
1124 	rtio_executor_err(iodev_sqe, result);
1125 }
1126 
1127 /**
1128  * Submit a completion queue event with a given result and userdata
1129  *
1130  * Called by the executor to produce a completion queue event, no inherent
1131  * locking is performed and this is not safe to do from multiple callers.
1132  *
1133  * @param r RTIO context
1134  * @param result Integer result code (could be -errno)
1135  * @param userdata Userdata to pass along to completion
1136  * @param flags Flags to use for the CEQ see RTIO_CQE_FLAG_*
1137  */
rtio_cqe_submit(struct rtio * r,int result,void * userdata,uint32_t flags)1138 static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1139 {
1140 	struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1141 
1142 	if (cqe == NULL) {
1143 		atomic_inc(&r->xcqcnt);
1144 	} else {
1145 		cqe->result = result;
1146 		cqe->userdata = userdata;
1147 		cqe->flags = flags;
1148 		rtio_cqe_produce(r, cqe);
1149 	}
1150 
1151 	atomic_inc(&r->cq_count);
1152 #ifdef CONFIG_RTIO_SUBMIT_SEM
1153 	if (r->submit_count > 0) {
1154 		r->submit_count--;
1155 		if (r->submit_count == 0) {
1156 			k_sem_give(r->submit_sem);
1157 		}
1158 	}
1159 #endif
1160 #ifdef CONFIG_RTIO_CONSUME_SEM
1161 	k_sem_give(r->consume_sem);
1162 #endif
1163 }
1164 
1165 #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1166 
1167 /**
1168  * @brief Get the buffer associate with the RX submission
1169  *
1170  * @param[in] iodev_sqe   The submission to probe
1171  * @param[in] min_buf_len The minimum number of bytes needed for the operation
1172  * @param[in] max_buf_len The maximum number of bytes needed for the operation
1173  * @param[out] buf        Where to store the pointer to the buffer
1174  * @param[out] buf_len    Where to store the size of the buffer
1175  *
1176  * @return 0 if @p buf and @p buf_len were successfully filled
1177  * @return -ENOMEM Not enough memory for @p min_buf_len
1178  */
rtio_sqe_rx_buf(const struct rtio_iodev_sqe * iodev_sqe,uint32_t min_buf_len,uint32_t max_buf_len,uint8_t ** buf,uint32_t * buf_len)1179 static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1180 				  uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1181 {
1182 	struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1183 
1184 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1185 	if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1186 		struct rtio *r = iodev_sqe->r;
1187 
1188 		if (sqe->buf != NULL) {
1189 			if (sqe->buf_len < min_buf_len) {
1190 				return -ENOMEM;
1191 			}
1192 			*buf = sqe->buf;
1193 			*buf_len = sqe->buf_len;
1194 			return 0;
1195 		}
1196 
1197 		int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1198 		if (rc == 0) {
1199 			sqe->buf = *buf;
1200 			sqe->buf_len = *buf_len;
1201 			return 0;
1202 		}
1203 
1204 		return -ENOMEM;
1205 	}
1206 #else
1207 	ARG_UNUSED(max_buf_len);
1208 #endif
1209 
1210 	if (sqe->buf_len < min_buf_len) {
1211 		return -ENOMEM;
1212 	}
1213 
1214 	*buf = sqe->buf;
1215 	*buf_len = sqe->buf_len;
1216 	return 0;
1217 }
1218 
1219 /**
1220  * @brief Release memory that was allocated by the RTIO's memory pool
1221  *
1222  * If the RTIO context was created by a call to RTIO_DEFINE_WITH_MEMPOOL(), then the cqe data might
1223  * contain a buffer that's owned by the RTIO context. In those cases (if the read request was
1224  * configured via rtio_sqe_read_with_pool()) the buffer must be returned back to the pool.
1225  *
1226  * Call this function when processing is complete. This function will validate that the memory
1227  * actually belongs to the RTIO context and will ignore invalid arguments.
1228  *
1229  * @param r RTIO context
1230  * @param buff Pointer to the buffer to be released.
1231  * @param buff_len Number of bytes to free (will be rounded up to nearest memory block).
1232  */
1233 __syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1234 
z_impl_rtio_release_buffer(struct rtio * r,void * buff,uint32_t buff_len)1235 static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1236 {
1237 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1238 	if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1239 		return;
1240 	}
1241 
1242 	rtio_block_pool_free(r, buff, buff_len);
1243 #else
1244 	ARG_UNUSED(r);
1245 	ARG_UNUSED(buff);
1246 	ARG_UNUSED(buff_len);
1247 #endif
1248 }
1249 
1250 /**
1251  * Grant access to an RTIO context to a user thread
1252  */
rtio_access_grant(struct rtio * r,struct k_thread * t)1253 static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1254 {
1255 	k_object_access_grant(r, t);
1256 
1257 #ifdef CONFIG_RTIO_SUBMIT_SEM
1258 	k_object_access_grant(r->submit_sem, t);
1259 #endif
1260 
1261 #ifdef CONFIG_RTIO_CONSUME_SEM
1262 	k_object_access_grant(r->consume_sem, t);
1263 #endif
1264 }
1265 
1266 /**
1267  * @brief Attempt to cancel an SQE
1268  *
1269  * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
1270  * result.
1271  *
1272  * @param[in] sqe The SQE to cancel
1273  * @return 0 if the SQE was flagged for cancellation
1274  * @return <0 on error
1275  */
1276 __syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1277 
z_impl_rtio_sqe_cancel(struct rtio_sqe * sqe)1278 static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1279 {
1280 	struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1281 
1282 	do {
1283 		iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1284 		iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1285 	} while (iodev_sqe != NULL);
1286 
1287 	return 0;
1288 }
1289 
1290 /**
1291  * @brief Copy an array of SQEs into the queue and get resulting handles back
1292  *
1293  * Copies one or more SQEs into the RTIO context and optionally returns their generated SQE handles.
1294  * Handles can be used to cancel events via the rtio_sqe_cancel() call.
1295  *
1296  * @param[in]  r RTIO context
1297  * @param[in]  sqes Pointer to an array of SQEs
1298  * @param[out] handle Optional pointer to @ref rtio_sqe pointer to store the handle of the
1299  *             first generated SQE. Use NULL to ignore.
1300  * @param[in]  sqe_count Count of sqes in array
1301  *
1302  * @retval 0 success
1303  * @retval -ENOMEM not enough room in the queue
1304  */
1305 __syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1306 					   struct rtio_sqe **handle, size_t sqe_count);
1307 
z_impl_rtio_sqe_copy_in_get_handles(struct rtio * r,const struct rtio_sqe * sqes,struct rtio_sqe ** handle,size_t sqe_count)1308 static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1309 						      struct rtio_sqe **handle,
1310 						      size_t sqe_count)
1311 {
1312 	struct rtio_sqe *sqe;
1313 	uint32_t acquirable = rtio_sqe_acquirable(r);
1314 
1315 	if (acquirable < sqe_count) {
1316 		return -ENOMEM;
1317 	}
1318 
1319 	for (unsigned long i = 0; i < sqe_count; i++) {
1320 		sqe = rtio_sqe_acquire(r);
1321 		__ASSERT_NO_MSG(sqe != NULL);
1322 		if (handle != NULL && i == 0) {
1323 			*handle = sqe;
1324 		}
1325 		*sqe = sqes[i];
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 /**
1332  * @brief Copy an array of SQEs into the queue
1333  *
1334  * Useful if a batch of submissions is stored in ROM or
1335  * RTIO is used from user mode where a copy must be made.
1336  *
1337  * Partial copying is not done as chained SQEs need to be submitted
1338  * as a whole set.
1339  *
1340  * @param r RTIO context
1341  * @param sqes Pointer to an array of SQEs
1342  * @param sqe_count Count of sqes in array
1343  *
1344  * @retval 0 success
1345  * @retval -ENOMEM not enough room in the queue
1346  */
rtio_sqe_copy_in(struct rtio * r,const struct rtio_sqe * sqes,size_t sqe_count)1347 static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1348 {
1349 	return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1350 }
1351 
1352 /**
1353  * @brief Copy an array of CQEs from the queue
1354  *
1355  * Copies from the RTIO context and its queue completion queue
1356  * events, waiting for the given time period to gather the number
1357  * of completions requested.
1358  *
1359  * @param r RTIO context
1360  * @param cqes Pointer to an array of SQEs
1361  * @param cqe_count Count of sqes in array
1362  * @param timeout Timeout to wait for each completion event. Total wait time is
1363  *                potentially timeout*cqe_count at maximum.
1364  *
1365  * @retval copy_count Count of copied CQEs (0 to cqe_count)
1366  */
1367 __syscall int rtio_cqe_copy_out(struct rtio *r,
1368 				struct rtio_cqe *cqes,
1369 				size_t cqe_count,
1370 				k_timeout_t timeout);
z_impl_rtio_cqe_copy_out(struct rtio * r,struct rtio_cqe * cqes,size_t cqe_count,k_timeout_t timeout)1371 static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1372 					   struct rtio_cqe *cqes,
1373 					   size_t cqe_count,
1374 					   k_timeout_t timeout)
1375 {
1376 	size_t copied = 0;
1377 	struct rtio_cqe *cqe;
1378 	k_timepoint_t end = sys_timepoint_calc(timeout);
1379 
1380 	do {
1381 		cqe = K_TIMEOUT_EQ(timeout, K_FOREVER) ? rtio_cqe_consume_block(r)
1382 						       : rtio_cqe_consume(r);
1383 		if (cqe == NULL) {
1384 #ifdef CONFIG_BOARD_NATIVE_POSIX
1385 			/* Native posix fakes the clock and only moves it forward when sleeping. */
1386 			k_sleep(K_TICKS(1));
1387 #else
1388 			Z_SPIN_DELAY(1);
1389 #endif
1390 			continue;
1391 		}
1392 		cqes[copied++] = *cqe;
1393 		rtio_cqe_release(r, cqe);
1394 	} while (copied < cqe_count && !sys_timepoint_expired(end));
1395 
1396 	return copied;
1397 }
1398 
1399 /**
1400  * @brief Submit I/O requests to the underlying executor
1401  *
1402  * Submits the queue of submission queue events to the executor.
1403  * The executor will do the work of managing tasks representing each
1404  * submission chain, freeing submission queue events when done, and
1405  * producing completion queue events as submissions are completed.
1406  *
1407  * @param r RTIO context
1408  * @param wait_count Number of submissions to wait for completion of.
1409  *
1410  * @retval 0 On success
1411  */
1412 __syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1413 
z_impl_rtio_submit(struct rtio * r,uint32_t wait_count)1414 static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1415 {
1416 	int res = 0;
1417 
1418 #ifdef CONFIG_RTIO_SUBMIT_SEM
1419 	/* TODO undefined behavior if another thread calls submit of course
1420 	 */
1421 	if (wait_count > 0) {
1422 		__ASSERT(!k_is_in_isr(),
1423 			 "expected rtio submit with wait count to be called from a thread");
1424 
1425 		k_sem_reset(r->submit_sem);
1426 		r->submit_count = wait_count;
1427 	}
1428 #else
1429 	uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1430 #endif
1431 
1432 	/* Submit the queue to the executor which consumes submissions
1433 	 * and produces completions through ISR chains or other means.
1434 	 */
1435 	rtio_executor_submit(r);
1436 
1437 
1438 	/* TODO could be nicer if we could suspend the thread and not
1439 	 * wake up on each completion here.
1440 	 */
1441 #ifdef CONFIG_RTIO_SUBMIT_SEM
1442 
1443 	if (wait_count > 0) {
1444 		res = k_sem_take(r->submit_sem, K_FOREVER);
1445 		__ASSERT(res == 0,
1446 			 "semaphore was reset or timed out while waiting on completions!");
1447 	}
1448 #else
1449 	while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1450 		Z_SPIN_DELAY(10);
1451 		k_yield();
1452 	}
1453 #endif
1454 
1455 	return res;
1456 }
1457 
1458 /**
1459  * @}
1460  */
1461 
1462 #ifdef __cplusplus
1463 }
1464 #endif
1465 
1466 #include <zephyr/syscalls/rtio.h>
1467 
1468 #endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
1469