1 /*
2  * Copyright (c) 2022 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Real-Time IO device API for moving bytes with low effort
10  *
11  * RTIO is a context for asynchronous batch operations using a submission and completion queue.
12  *
13  * Asynchronous I/O operation are setup in a submission queue. Each entry in the queue describes
14  * the operation it wishes to perform with some understood semantics.
15  *
16  * These operations may be chained in a such a way that only when the current
17  * operation is complete the next will be executed. If the current operation fails
18  * all chained operations will also fail.
19  *
20  * Operations may also be submitted as a transaction where a set of operations are considered
21  * to be one operation.
22  *
23  * The completion of these operations typically provide one or more completion queue events.
24  */
25 
26 #ifndef ZEPHYR_INCLUDE_RTIO_RTIO_H_
27 #define ZEPHYR_INCLUDE_RTIO_RTIO_H_
28 
29 #include <string.h>
30 
31 #include <zephyr/app_memory/app_memdomain.h>
32 #include <zephyr/device.h>
33 #include <zephyr/kernel.h>
34 #include <zephyr/sys/__assert.h>
35 #include <zephyr/sys/atomic.h>
36 #include <zephyr/sys/mem_blocks.h>
37 #include <zephyr/sys/util.h>
38 #include <zephyr/sys/iterable_sections.h>
39 #include <zephyr/sys/mpsc_lockfree.h>
40 
41 #ifdef __cplusplus
42 extern "C" {
43 #endif
44 
45 
46 /**
47  * @brief RTIO
48  * @defgroup rtio RTIO
49  * @since 3.2
50  * @version 0.1.0
51  * @ingroup os_services
52  * @{
53  */
54 
55 /**
56  * @brief RTIO Predefined Priorities
57  * @defgroup rtio_sqe_prio RTIO Priorities
58  * @ingroup rtio
59  * @{
60  */
61 
62 /**
63  * @brief Low priority
64  */
65 #define RTIO_PRIO_LOW 0U
66 
67 /**
68  * @brief Normal priority
69  */
70 #define RTIO_PRIO_NORM 127U
71 
72 /**
73  * @brief High priority
74  */
75 #define RTIO_PRIO_HIGH 255U
76 
77 /**
78  * @}
79  */
80 
81 
82 /**
83  * @brief RTIO SQE Flags
84  * @defgroup rtio_sqe_flags RTIO SQE Flags
85  * @ingroup rtio
86  * @{
87  */
88 
89 /**
90  * @brief The next request in the queue should wait on this one.
91  *
92  * Chained SQEs are individual units of work describing patterns of
93  * ordering and failure cascading. A chained SQE must be started only
94  * after the one before it. They are given to the iodevs one after another.
95  */
96 #define RTIO_SQE_CHAINED BIT(0)
97 
98 /**
99  * @brief The next request in the queue is part of a transaction.
100  *
101  * Transactional SQEs are sequential parts of a unit of work.
102  * Only the first transactional SQE is submitted to an iodev, the
103  * remaining SQEs are never individually submitted but instead considered
104  * to be part of the transaction to the single iodev. The first sqe in the
105  * sequence holds the iodev that will be used and the last holds the userdata
106  * that will be returned in a single completion on failure/success.
107  */
108 #define RTIO_SQE_TRANSACTION BIT(1)
109 
110 
111 /**
112  * @brief The buffer should be allocated by the RTIO mempool
113  *
114  * This flag can only exist if the CONFIG_RTIO_SYS_MEM_BLOCKS Kconfig was
115  * enabled and the RTIO context was created via the RTIO_DEFINE_WITH_MEMPOOL()
116  * macro. If set, the buffer associated with the entry was allocated by the
117  * internal memory pool and should be released as soon as it is no longer
118  * needed via a call to rtio_release_mempool().
119  */
120 #define RTIO_SQE_MEMPOOL_BUFFER BIT(2)
121 
122 /**
123  * @brief The SQE should not execute if possible
124  *
125  * If possible (not yet executed), the SQE should be canceled by flagging it as failed and returning
126  * -ECANCELED as the result.
127  */
128 #define RTIO_SQE_CANCELED BIT(3)
129 
130 /**
131  * @brief The SQE should continue producing CQEs until canceled
132  *
133  * This flag must exist along @ref RTIO_SQE_MEMPOOL_BUFFER and signals that when a read is
134  * complete. It should be placed back in queue until canceled.
135  */
136 #define RTIO_SQE_MULTISHOT BIT(4)
137 
138 /**
139  * @brief The SQE does not produce a CQE.
140  */
141 #define RTIO_SQE_NO_RESPONSE BIT(5)
142 
143 /**
144  * @}
145  */
146 
147 /**
148  * @brief RTIO CQE Flags
149  * @defgroup rtio_cqe_flags RTIO CQE Flags
150  * @ingroup rtio
151  * @{
152  */
153 
154 /**
155  * @brief The entry's buffer was allocated from the RTIO's mempool
156  *
157  * If this bit is set, the buffer was allocated from the memory pool and should be recycled as
158  * soon as the application is done with it.
159  */
160 #define RTIO_CQE_FLAG_MEMPOOL_BUFFER BIT(0)
161 
162 #define RTIO_CQE_FLAG_GET(flags) FIELD_GET(GENMASK(7, 0), (flags))
163 
164 /**
165  * @brief Get the block index of a mempool flags
166  *
167  * @param flags The CQE flags value
168  * @return The block index portion of the flags field.
169  */
170 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(flags) FIELD_GET(GENMASK(19, 8), (flags))
171 
172 /**
173  * @brief Get the block count of a mempool flags
174  *
175  * @param flags The CQE flags value
176  * @return The block count portion of the flags field.
177  */
178 #define RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(flags) FIELD_GET(GENMASK(31, 20), (flags))
179 
180 /**
181  * @brief Prepare CQE flags for a mempool read.
182  *
183  * @param blk_idx The mempool block index
184  * @param blk_cnt The mempool block count
185  * @return A shifted and masked value that can be added to the flags field with an OR operator.
186  */
187 #define RTIO_CQE_FLAG_PREP_MEMPOOL(blk_idx, blk_cnt)                                               \
188 	(FIELD_PREP(GENMASK(7, 0), RTIO_CQE_FLAG_MEMPOOL_BUFFER) |                                 \
189 	 FIELD_PREP(GENMASK(19, 8), blk_idx) | FIELD_PREP(GENMASK(31, 20), blk_cnt))
190 
191 /**
192  * @}
193  */
194 
195 /**
196  * @brief Equivalent to the I2C_MSG_STOP flag
197  */
198 #define RTIO_IODEV_I2C_STOP BIT(1)
199 
200 /**
201  * @brief Equivalent to the I2C_MSG_RESTART flag
202  */
203 #define RTIO_IODEV_I2C_RESTART BIT(2)
204 
205 /**
206  * @brief Equivalent to the I2C_MSG_ADDR_10_BITS
207  */
208 #define RTIO_IODEV_I2C_10_BITS BIT(3)
209 
210 /**
211  * @brief Equivalent to the I3C_MSG_STOP flag
212  */
213 #define RTIO_IODEV_I3C_STOP BIT(1)
214 
215 /**
216  * @brief Equivalent to the I3C_MSG_RESTART flag
217  */
218 #define RTIO_IODEV_I3C_RESTART BIT(2)
219 
220 /**
221  * @brief Equivalent to the I3C_MSG_HDR
222  */
223 #define RTIO_IODEV_I3C_HDR BIT(3)
224 
225 /**
226  * @brief Equivalent to the I3C_MSG_NBCH
227  */
228 #define RTIO_IODEV_I3C_NBCH BIT(4)
229 
230 /**
231  * @brief I3C HDR Mode Mask
232  */
233 #define RTIO_IODEV_I3C_HDR_MODE_MASK GENMASK(15, 8)
234 
235 /**
236  * @brief I3C HDR Mode Mask
237  */
238 #define RTIO_IODEV_I3C_HDR_MODE_SET(flags) \
239 	FIELD_PREP(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
240 
241 /**
242  * @brief I3C HDR Mode Mask
243  */
244 #define RTIO_IODEV_I3C_HDR_MODE_GET(flags) \
245 	FIELD_GET(RTIO_IODEV_I3C_HDR_MODE_MASK, flags)
246 
247 /**
248  * @brief I3C HDR 7b Command Code
249  */
250 #define RTIO_IODEV_I3C_HDR_CMD_CODE_MASK GENMASK(22, 16)
251 
252 /**
253  * @brief I3C HDR 7b Command Code
254  */
255 #define RTIO_IODEV_I3C_HDR_CMD_CODE_SET(flags) \
256 	FIELD_PREP(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
257 
258 /**
259  * @brief I3C HDR 7b Command Code
260  */
261 #define RTIO_IODEV_I3C_HDR_CMD_CODE_GET(flags) \
262 	FIELD_GET(RTIO_IODEV_I3C_HDR_CMD_CODE_MASK, flags)
263 
264 /** @cond ignore */
265 struct rtio;
266 struct rtio_cqe;
267 struct rtio_sqe;
268 struct rtio_sqe_pool;
269 struct rtio_cqe_pool;
270 struct rtio_iodev;
271 struct rtio_iodev_sqe;
272 /** @endcond */
273 
274 /**
275  * @typedef rtio_callback_t
276  * @brief Callback signature for RTIO_OP_CALLBACK
277  * @param r RTIO context being used with the callback
278  * @param sqe Submission for the callback op
279  * @param arg0 Argument option as part of the sqe
280  */
281 typedef void (*rtio_callback_t)(struct rtio *r, const struct rtio_sqe *sqe, void *arg0);
282 
283 /**
284  * @brief A submission queue event
285  */
286 struct rtio_sqe {
287 	uint8_t op; /**< Op code */
288 
289 	uint8_t prio; /**< Op priority */
290 
291 	uint16_t flags; /**< Op Flags */
292 
293 	uint32_t iodev_flags; /**< Op iodev flags */
294 
295 	const struct rtio_iodev *iodev; /**< Device to operation on */
296 
297 	/**
298 	 * User provided data which is returned upon operation completion. Could be a pointer or
299 	 * integer.
300 	 *
301 	 * If unique identification of completions is desired this should be
302 	 * unique as well.
303 	 */
304 	void *userdata;
305 
306 	union {
307 
308 		/** OP_TX */
309 		struct {
310 			uint32_t buf_len; /**< Length of buffer */
311 			const uint8_t *buf; /**< Buffer to write from */
312 		} tx;
313 
314 		/** OP_RX */
315 		struct {
316 			uint32_t buf_len; /**< Length of buffer */
317 			uint8_t *buf; /**< Buffer to read into */
318 		} rx;
319 
320 		/** OP_TINY_TX */
321 		struct {
322 			uint8_t buf_len; /**< Length of tiny buffer */
323 			uint8_t buf[7]; /**< Tiny buffer */
324 		} tiny_tx;
325 
326 		/** OP_CALLBACK */
327 		struct {
328 			rtio_callback_t callback;
329 			void *arg0; /**< Last argument given to callback */
330 		} callback;
331 
332 		/** OP_TXRX */
333 		struct {
334 			uint32_t buf_len; /**< Length of tx and rx buffers */
335 			const uint8_t *tx_buf; /**< Buffer to write from */
336 			uint8_t *rx_buf; /**< Buffer to read into */
337 		} txrx;
338 
339 		/** OP_I2C_CONFIGURE */
340 		uint32_t i2c_config;
341 
342 		/** OP_I3C_CONFIGURE */
343 		struct {
344 			/* enum i3c_config_type type; */
345 			int type;
346 			void *config;
347 		} i3c_config;
348 
349 		/** OP_I3C_CCC */
350 		/* struct i3c_ccc_payload *ccc_payload; */
351 		void *ccc_payload;
352 	};
353 };
354 
355 /** @cond ignore */
356 /* Ensure the rtio_sqe never grows beyond a common cacheline size of 64 bytes */
357 BUILD_ASSERT(sizeof(struct rtio_sqe) <= 64);
358 /** @endcond */
359 
360 /**
361  * @brief A completion queue event
362  */
363 struct rtio_cqe {
364 	struct mpsc_node q;
365 
366 	int32_t result; /**< Result from operation */
367 	void *userdata; /**< Associated userdata with operation */
368 	uint32_t flags; /**< Flags associated with the operation */
369 };
370 
371 struct rtio_sqe_pool {
372 	struct mpsc free_q;
373 	const uint16_t pool_size;
374 	uint16_t pool_free;
375 	struct rtio_iodev_sqe *pool;
376 };
377 
378 struct rtio_cqe_pool {
379 	struct mpsc free_q;
380 	const uint16_t pool_size;
381 	uint16_t pool_free;
382 	struct rtio_cqe *pool;
383 };
384 
385 /**
386  * @brief An RTIO context containing what can be viewed as a pair of queues.
387  *
388  * A queue for submissions (available and in queue to be produced) as well as a queue
389  * of completions (available and ready to be consumed).
390  *
391  * The rtio executor along with any objects implementing the rtio_iodev interface are
392  * the consumers of submissions and producers of completions.
393  *
394  * No work is started until rtio_submit() is called.
395  */
396 struct rtio {
397 #ifdef CONFIG_RTIO_SUBMIT_SEM
398 	/* A wait semaphore which may suspend the calling thread
399 	 * to wait for some number of completions when calling submit
400 	 */
401 	struct k_sem *submit_sem;
402 
403 	uint32_t submit_count;
404 #endif
405 
406 #ifdef CONFIG_RTIO_CONSUME_SEM
407 	/* A wait semaphore which may suspend the calling thread
408 	 * to wait for some number of completions while consuming
409 	 * them from the completion queue
410 	 */
411 	struct k_sem *consume_sem;
412 #endif
413 
414 	/* Total number of completions */
415 	atomic_t cq_count;
416 
417 	/* Number of completions that were unable to be submitted with results
418 	 * due to the cq spsc being full
419 	 */
420 	atomic_t xcqcnt;
421 
422 	/* Submission queue object pool with free list */
423 	struct rtio_sqe_pool *sqe_pool;
424 
425 	/* Complete queue object pool with free list */
426 	struct rtio_cqe_pool *cqe_pool;
427 
428 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
429 	/* Mem block pool */
430 	struct sys_mem_blocks *block_pool;
431 #endif
432 
433 	/* Submission queue */
434 	struct mpsc sq;
435 
436 	/* Completion queue */
437 	struct mpsc cq;
438 };
439 
440 /** The memory partition associated with all RTIO context information */
441 extern struct k_mem_partition rtio_partition;
442 
443 /**
444  * @brief Get the mempool block size of the RTIO context
445  *
446  * @param[in] r The RTIO context
447  * @return The size of each block in the context's mempool
448  * @return 0 if the context doesn't have a mempool
449  */
rtio_mempool_block_size(const struct rtio * r)450 static inline size_t rtio_mempool_block_size(const struct rtio *r)
451 {
452 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
453 	ARG_UNUSED(r);
454 	return 0;
455 #else
456 	if (r == NULL || r->block_pool == NULL) {
457 		return 0;
458 	}
459 	return BIT(r->block_pool->info.blk_sz_shift);
460 #endif
461 }
462 
463 /**
464  * @brief Compute the mempool block index for a given pointer
465  *
466  * @param[in] r RTIO context
467  * @param[in] ptr Memory pointer in the mempool
468  * @return Index of the mempool block associated with the pointer. Or UINT16_MAX if invalid.
469  */
470 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
__rtio_compute_mempool_block_index(const struct rtio * r,const void * ptr)471 static inline uint16_t __rtio_compute_mempool_block_index(const struct rtio *r, const void *ptr)
472 {
473 	uintptr_t addr = (uintptr_t)ptr;
474 	struct sys_mem_blocks *mem_pool = r->block_pool;
475 	uint32_t block_size = rtio_mempool_block_size(r);
476 
477 	uintptr_t buff = (uintptr_t)mem_pool->buffer;
478 	uint32_t buff_size = mem_pool->info.num_blocks * block_size;
479 
480 	if (addr < buff || addr >= buff + buff_size) {
481 		return UINT16_MAX;
482 	}
483 	return (addr - buff) / block_size;
484 }
485 #endif
486 
487 /**
488  * @brief IO device submission queue entry
489  *
490  * May be cast safely to and from a rtio_sqe as they occupy the same memory provided by the pool
491  */
492 struct rtio_iodev_sqe {
493 	struct rtio_sqe sqe;
494 	struct mpsc_node q;
495 	struct rtio_iodev_sqe *next;
496 	struct rtio *r;
497 };
498 
499 /**
500  * @brief API that an RTIO IO device should implement
501  */
502 struct rtio_iodev_api {
503 	/**
504 	 * @brief Submit to the iodev an entry to work on
505 	 *
506 	 * This call should be short in duration and most likely
507 	 * either enqueue or kick off an entry with the hardware.
508 	 *
509 	 * @param iodev_sqe Submission queue entry
510 	 */
511 	void (*submit)(struct rtio_iodev_sqe *iodev_sqe);
512 };
513 
514 /**
515  * @brief An IO device with a function table for submitting requests
516  */
517 struct rtio_iodev {
518 	/* Function pointer table */
519 	const struct rtio_iodev_api *api;
520 
521 	/* Data associated with this iodev */
522 	void *data;
523 };
524 
525 /** An operation that does nothing and will complete immediately */
526 #define RTIO_OP_NOP 0
527 
528 /** An operation that receives (reads) */
529 #define RTIO_OP_RX (RTIO_OP_NOP+1)
530 
531 /** An operation that transmits (writes) */
532 #define RTIO_OP_TX (RTIO_OP_RX+1)
533 
534 /** An operation that transmits tiny writes by copying the data to write */
535 #define RTIO_OP_TINY_TX (RTIO_OP_TX+1)
536 
537 /** An operation that calls a given function (callback) */
538 #define RTIO_OP_CALLBACK (RTIO_OP_TINY_TX+1)
539 
540 /** An operation that transceives (reads and writes simultaneously) */
541 #define RTIO_OP_TXRX (RTIO_OP_CALLBACK+1)
542 
543 /** An operation to recover I2C buses */
544 #define RTIO_OP_I2C_RECOVER (RTIO_OP_TXRX+1)
545 
546 /** An operation to configure I2C buses */
547 #define RTIO_OP_I2C_CONFIGURE (RTIO_OP_I2C_RECOVER+1)
548 
549 /** An operation to recover I3C buses */
550 #define RTIO_OP_I3C_RECOVER (RTIO_OP_I2C_CONFIGURE+1)
551 
552 /** An operation to configure I3C buses */
553 #define RTIO_OP_I3C_CONFIGURE (RTIO_OP_I3C_RECOVER+1)
554 
555 /** An operation to sends I3C CCC */
556 #define RTIO_OP_I3C_CCC (RTIO_OP_I3C_CONFIGURE+1)
557 
558 /**
559  * @brief Prepare a nop (no op) submission
560  */
rtio_sqe_prep_nop(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,void * userdata)561 static inline void rtio_sqe_prep_nop(struct rtio_sqe *sqe,
562 				const struct rtio_iodev *iodev,
563 				void *userdata)
564 {
565 	memset(sqe, 0, sizeof(struct rtio_sqe));
566 	sqe->op = RTIO_OP_NOP;
567 	sqe->iodev = iodev;
568 	sqe->userdata = userdata;
569 }
570 
571 /**
572  * @brief Prepare a read op submission
573  */
rtio_sqe_prep_read(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,uint8_t * buf,uint32_t len,void * userdata)574 static inline void rtio_sqe_prep_read(struct rtio_sqe *sqe,
575 				      const struct rtio_iodev *iodev,
576 				      int8_t prio,
577 				      uint8_t *buf,
578 				      uint32_t len,
579 				      void *userdata)
580 {
581 	memset(sqe, 0, sizeof(struct rtio_sqe));
582 	sqe->op = RTIO_OP_RX;
583 	sqe->prio = prio;
584 	sqe->iodev = iodev;
585 	sqe->rx.buf_len = len;
586 	sqe->rx.buf = buf;
587 	sqe->userdata = userdata;
588 }
589 
590 /**
591  * @brief Prepare a read op submission with context's mempool
592  *
593  * @see rtio_sqe_prep_read()
594  */
rtio_sqe_prep_read_with_pool(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)595 static inline void rtio_sqe_prep_read_with_pool(struct rtio_sqe *sqe,
596 						const struct rtio_iodev *iodev, int8_t prio,
597 						void *userdata)
598 {
599 	rtio_sqe_prep_read(sqe, iodev, prio, NULL, 0, userdata);
600 	sqe->flags = RTIO_SQE_MEMPOOL_BUFFER;
601 }
602 
rtio_sqe_prep_read_multishot(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,void * userdata)603 static inline void rtio_sqe_prep_read_multishot(struct rtio_sqe *sqe,
604 						const struct rtio_iodev *iodev, int8_t prio,
605 						void *userdata)
606 {
607 	rtio_sqe_prep_read_with_pool(sqe, iodev, prio, userdata);
608 	sqe->flags |= RTIO_SQE_MULTISHOT;
609 }
610 
611 /**
612  * @brief Prepare a write op submission
613  */
rtio_sqe_prep_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,const uint8_t * buf,uint32_t len,void * userdata)614 static inline void rtio_sqe_prep_write(struct rtio_sqe *sqe,
615 				       const struct rtio_iodev *iodev,
616 				       int8_t prio,
617 				       const uint8_t *buf,
618 				       uint32_t len,
619 				       void *userdata)
620 {
621 	memset(sqe, 0, sizeof(struct rtio_sqe));
622 	sqe->op = RTIO_OP_TX;
623 	sqe->prio = prio;
624 	sqe->iodev = iodev;
625 	sqe->tx.buf_len = len;
626 	sqe->tx.buf = buf;
627 	sqe->userdata = userdata;
628 }
629 
630 /**
631  * @brief Prepare a tiny write op submission
632  *
633  * Unlike the normal write operation where the source buffer must outlive the call
634  * the tiny write data in this case is copied to the sqe. It must be tiny to fit
635  * within the specified size of a rtio_sqe.
636  *
637  * This is useful in many scenarios with RTL logic where a write of the register to
638  * subsequently read must be done.
639  */
rtio_sqe_prep_tiny_write(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,const uint8_t * tiny_write_data,uint8_t tiny_write_len,void * userdata)640 static inline void rtio_sqe_prep_tiny_write(struct rtio_sqe *sqe,
641 					    const struct rtio_iodev *iodev,
642 					    int8_t prio,
643 					    const uint8_t *tiny_write_data,
644 					    uint8_t tiny_write_len,
645 					    void *userdata)
646 {
647 	__ASSERT_NO_MSG(tiny_write_len <= sizeof(sqe->tiny_tx.buf));
648 
649 	memset(sqe, 0, sizeof(struct rtio_sqe));
650 	sqe->op = RTIO_OP_TINY_TX;
651 	sqe->prio = prio;
652 	sqe->iodev = iodev;
653 	sqe->tiny_tx.buf_len = tiny_write_len;
654 	memcpy(sqe->tiny_tx.buf, tiny_write_data, tiny_write_len);
655 	sqe->userdata = userdata;
656 }
657 
658 /**
659  * @brief Prepare a callback op submission
660  *
661  * A somewhat special operation in that it may only be done in kernel mode.
662  *
663  * Used where general purpose logic is required in a queue of io operations to do
664  * transforms or logic.
665  */
rtio_sqe_prep_callback(struct rtio_sqe * sqe,rtio_callback_t callback,void * arg0,void * userdata)666 static inline void rtio_sqe_prep_callback(struct rtio_sqe *sqe,
667 					  rtio_callback_t callback,
668 					  void *arg0,
669 					  void *userdata)
670 {
671 	memset(sqe, 0, sizeof(struct rtio_sqe));
672 	sqe->op = RTIO_OP_CALLBACK;
673 	sqe->prio = 0;
674 	sqe->iodev = NULL;
675 	sqe->callback.callback = callback;
676 	sqe->callback.arg0 = arg0;
677 	sqe->userdata = userdata;
678 }
679 
680 /**
681  * @brief Prepare a callback op submission that does not create a CQE
682  *
683  * Similar to @ref rtio_sqe_prep_callback, but the @ref RTIO_SQE_NO_RESPONSE
684  * flag is set on the SQE to prevent the generation of a CQE upon completion.
685  *
686  * This can be useful when the callback is the last operation in a sequence
687  * whose job is to clean up all the previous CQE's. Without @ref RTIO_SQE_NO_RESPONSE
688  * the completion itself will result in a CQE that cannot be consumed in the callback.
689  */
rtio_sqe_prep_callback_no_cqe(struct rtio_sqe * sqe,rtio_callback_t callback,void * arg0,void * userdata)690 static inline void rtio_sqe_prep_callback_no_cqe(struct rtio_sqe *sqe,
691 						 rtio_callback_t callback,
692 						 void *arg0,
693 						 void *userdata)
694 {
695 	rtio_sqe_prep_callback(sqe, callback, arg0, userdata);
696 	sqe->flags |= RTIO_SQE_NO_RESPONSE;
697 }
698 
699 /**
700  * @brief Prepare a transceive op submission
701  */
rtio_sqe_prep_transceive(struct rtio_sqe * sqe,const struct rtio_iodev * iodev,int8_t prio,const uint8_t * tx_buf,uint8_t * rx_buf,uint32_t buf_len,void * userdata)702 static inline void rtio_sqe_prep_transceive(struct rtio_sqe *sqe,
703 					    const struct rtio_iodev *iodev,
704 					    int8_t prio,
705 					    const uint8_t *tx_buf,
706 					    uint8_t *rx_buf,
707 					    uint32_t buf_len,
708 					    void *userdata)
709 {
710 	memset(sqe, 0, sizeof(struct rtio_sqe));
711 	sqe->op = RTIO_OP_TXRX;
712 	sqe->prio = prio;
713 	sqe->iodev = iodev;
714 	sqe->txrx.buf_len = buf_len;
715 	sqe->txrx.tx_buf = tx_buf;
716 	sqe->txrx.rx_buf = rx_buf;
717 	sqe->userdata = userdata;
718 }
719 
rtio_sqe_pool_alloc(struct rtio_sqe_pool * pool)720 static inline struct rtio_iodev_sqe *rtio_sqe_pool_alloc(struct rtio_sqe_pool *pool)
721 {
722 	struct mpsc_node *node = mpsc_pop(&pool->free_q);
723 
724 	if (node == NULL) {
725 		return NULL;
726 	}
727 
728 	struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
729 
730 	pool->pool_free--;
731 
732 	return iodev_sqe;
733 }
734 
rtio_sqe_pool_free(struct rtio_sqe_pool * pool,struct rtio_iodev_sqe * iodev_sqe)735 static inline void rtio_sqe_pool_free(struct rtio_sqe_pool *pool, struct rtio_iodev_sqe *iodev_sqe)
736 {
737 	mpsc_push(&pool->free_q, &iodev_sqe->q);
738 
739 	pool->pool_free++;
740 }
741 
rtio_cqe_pool_alloc(struct rtio_cqe_pool * pool)742 static inline struct rtio_cqe *rtio_cqe_pool_alloc(struct rtio_cqe_pool *pool)
743 {
744 	struct mpsc_node *node = mpsc_pop(&pool->free_q);
745 
746 	if (node == NULL) {
747 		return NULL;
748 	}
749 
750 	struct rtio_cqe *cqe = CONTAINER_OF(node, struct rtio_cqe, q);
751 
752 	memset(cqe, 0, sizeof(struct rtio_cqe));
753 
754 	pool->pool_free--;
755 
756 	return cqe;
757 }
758 
rtio_cqe_pool_free(struct rtio_cqe_pool * pool,struct rtio_cqe * cqe)759 static inline void rtio_cqe_pool_free(struct rtio_cqe_pool *pool, struct rtio_cqe *cqe)
760 {
761 	mpsc_push(&pool->free_q, &cqe->q);
762 
763 	pool->pool_free++;
764 }
765 
rtio_block_pool_alloc(struct rtio * r,size_t min_sz,size_t max_sz,uint8_t ** buf,uint32_t * buf_len)766 static inline int rtio_block_pool_alloc(struct rtio *r, size_t min_sz,
767 					  size_t max_sz, uint8_t **buf, uint32_t *buf_len)
768 {
769 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
770 	ARG_UNUSED(r);
771 	ARG_UNUSED(min_sz);
772 	ARG_UNUSED(max_sz);
773 	ARG_UNUSED(buf);
774 	ARG_UNUSED(buf_len);
775 	return -ENOTSUP;
776 #else
777 	const uint32_t block_size = rtio_mempool_block_size(r);
778 	uint32_t bytes = max_sz;
779 
780 	/* Not every context has a block pool and the block size may return 0 in
781 	 * that case
782 	 */
783 	if (block_size == 0) {
784 		return -ENOMEM;
785 	}
786 
787 	do {
788 		size_t num_blks = DIV_ROUND_UP(bytes, block_size);
789 		int rc = sys_mem_blocks_alloc_contiguous(r->block_pool, num_blks, (void **)buf);
790 
791 		if (rc == 0) {
792 			*buf_len = num_blks * block_size;
793 			return 0;
794 		}
795 
796 		if (bytes <= block_size) {
797 			break;
798 		}
799 
800 		bytes -= block_size;
801 	} while (bytes >= min_sz);
802 
803 	return -ENOMEM;
804 #endif
805 }
806 
rtio_block_pool_free(struct rtio * r,void * buf,uint32_t buf_len)807 static inline void rtio_block_pool_free(struct rtio *r, void *buf, uint32_t buf_len)
808 {
809 #ifndef CONFIG_RTIO_SYS_MEM_BLOCKS
810 	ARG_UNUSED(r);
811 	ARG_UNUSED(buf);
812 	ARG_UNUSED(buf_len);
813 #else
814 	size_t num_blks = buf_len >> r->block_pool->info.blk_sz_shift;
815 
816 	sys_mem_blocks_free_contiguous(r->block_pool, buf, num_blks);
817 #endif
818 }
819 
820 /* Do not try and reformat the macros */
821 /* clang-format off */
822 
823 /**
824  * @brief Statically define and initialize an RTIO IODev
825  *
826  * @param name Name of the iodev
827  * @param iodev_api Pointer to struct rtio_iodev_api
828  * @param iodev_data Data pointer
829  */
830 #define RTIO_IODEV_DEFINE(name, iodev_api, iodev_data)		\
831 	STRUCT_SECTION_ITERABLE(rtio_iodev, name) = {		\
832 		.api = (iodev_api),				\
833 		.data = (iodev_data),				\
834 	}
835 
836 #define Z_RTIO_SQE_POOL_DEFINE(name, sz)			\
837 	static struct rtio_iodev_sqe CONCAT(_sqe_pool_, name)[sz];	\
838 	STRUCT_SECTION_ITERABLE(rtio_sqe_pool, name) = {	\
839 		.free_q = MPSC_INIT((name.free_q)),	\
840 		.pool_size = sz,				\
841 		.pool_free = sz,				\
842 		.pool = CONCAT(_sqe_pool_, name),		\
843 	}
844 
845 
846 #define Z_RTIO_CQE_POOL_DEFINE(name, sz)			\
847 	static struct rtio_cqe CONCAT(_cqe_pool_, name)[sz];	\
848 	STRUCT_SECTION_ITERABLE(rtio_cqe_pool, name) = {	\
849 		.free_q = MPSC_INIT((name.free_q)),	\
850 		.pool_size = sz,				\
851 		.pool_free = sz,				\
852 		.pool = CONCAT(_cqe_pool_, name),		\
853 	}
854 
855 /**
856  * @brief Allocate to bss if available
857  *
858  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition bss. Maps to:
859  *   K_APP_BMEM(rtio_partition) static
860  *
861  * If CONFIG_USERSPACE is disabled, allocate as plain static:
862  *   static
863  */
864 #define RTIO_BMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_BMEM(rtio_partition) static), (static))
865 
866 /**
867  * @brief Allocate as initialized memory if available
868  *
869  * If CONFIG_USERSPACE is selected, allocate to the rtio_partition init. Maps to:
870  *   K_APP_DMEM(rtio_partition) static
871  *
872  * If CONFIG_USERSPACE is disabled, allocate as plain static:
873  *   static
874  */
875 #define RTIO_DMEM COND_CODE_1(CONFIG_USERSPACE, (K_APP_DMEM(rtio_partition) static), (static))
876 
877 #define Z_RTIO_BLOCK_POOL_DEFINE(name, blk_sz, blk_cnt, blk_align)                                 \
878 	RTIO_BMEM uint8_t __aligned(WB_UP(blk_align))                                              \
879 	CONCAT(_block_pool_, name)[blk_cnt*WB_UP(blk_sz)];                                         \
880 	_SYS_MEM_BLOCKS_DEFINE_WITH_EXT_BUF(name, WB_UP(blk_sz), blk_cnt,                          \
881 					    CONCAT(_block_pool_, name),	RTIO_DMEM)
882 
883 #define Z_RTIO_DEFINE(name, _sqe_pool, _cqe_pool, _block_pool)                                     \
884 	IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM,                                                         \
885 		   (static K_SEM_DEFINE(CONCAT(_submit_sem_, name), 0, K_SEM_MAX_LIMIT)))          \
886 	IF_ENABLED(CONFIG_RTIO_CONSUME_SEM,                                                        \
887 		   (static K_SEM_DEFINE(CONCAT(_consume_sem_, name), 0, K_SEM_MAX_LIMIT)))         \
888 	STRUCT_SECTION_ITERABLE(rtio, name) = {                                                    \
889 		IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_sem = &CONCAT(_submit_sem_, name),))   \
890 		IF_ENABLED(CONFIG_RTIO_SUBMIT_SEM, (.submit_count = 0,))                           \
891 		IF_ENABLED(CONFIG_RTIO_CONSUME_SEM, (.consume_sem = &CONCAT(_consume_sem_, name),))\
892 		.cq_count = ATOMIC_INIT(0),                                                        \
893 		.xcqcnt = ATOMIC_INIT(0),                                                          \
894 		.sqe_pool = _sqe_pool,                                                             \
895 		.cqe_pool = _cqe_pool,                                                             \
896 		IF_ENABLED(CONFIG_RTIO_SYS_MEM_BLOCKS, (.block_pool = _block_pool,))               \
897 		.sq = MPSC_INIT((name.sq)),                                                        \
898 		.cq = MPSC_INIT((name.cq)),                                                        \
899 	}
900 
901 /**
902  * @brief Statically define and initialize an RTIO context
903  *
904  * @param name Name of the RTIO
905  * @param sq_sz Size of the submission queue entry pool
906  * @param cq_sz Size of the completion queue entry pool
907  */
908 #define RTIO_DEFINE(name, sq_sz, cq_sz)						\
909 	Z_RTIO_SQE_POOL_DEFINE(CONCAT(name, _sqe_pool), sq_sz);			\
910 	Z_RTIO_CQE_POOL_DEFINE(CONCAT(name, _cqe_pool), cq_sz);			\
911 	Z_RTIO_DEFINE(name, &CONCAT(name, _sqe_pool),				\
912 		      &CONCAT(name, _cqe_pool), NULL)
913 
914 /* clang-format on */
915 
916 /**
917  * @brief Statically define and initialize an RTIO context
918  *
919  * @param name Name of the RTIO
920  * @param sq_sz Size of the submission queue, must be power of 2
921  * @param cq_sz Size of the completion queue, must be power of 2
922  * @param num_blks Number of blocks in the memory pool
923  * @param blk_size The number of bytes in each block
924  * @param balign The block alignment
925  */
926 #define RTIO_DEFINE_WITH_MEMPOOL(name, sq_sz, cq_sz, num_blks, blk_size, balign) \
927 	Z_RTIO_SQE_POOL_DEFINE(name##_sqe_pool, sq_sz);		\
928 	Z_RTIO_CQE_POOL_DEFINE(name##_cqe_pool, cq_sz);			\
929 	Z_RTIO_BLOCK_POOL_DEFINE(name##_block_pool, blk_size, num_blks, balign); \
930 	Z_RTIO_DEFINE(name, &name##_sqe_pool, &name##_cqe_pool, &name##_block_pool)
931 
932 /* clang-format on */
933 
934 /**
935  * @brief Count of acquirable submission queue events
936  *
937  * @param r RTIO context
938  *
939  * @return Count of acquirable submission queue events
940  */
rtio_sqe_acquirable(struct rtio * r)941 static inline uint32_t rtio_sqe_acquirable(struct rtio *r)
942 {
943 	return r->sqe_pool->pool_free;
944 }
945 
946 /**
947  * @brief Get the next sqe in the transaction
948  *
949  * @param iodev_sqe Submission queue entry
950  *
951  * @retval NULL if current sqe is last in transaction
952  * @retval struct rtio_sqe * if available
953  */
rtio_txn_next(const struct rtio_iodev_sqe * iodev_sqe)954 static inline struct rtio_iodev_sqe *rtio_txn_next(const struct rtio_iodev_sqe *iodev_sqe)
955 {
956 	if (iodev_sqe->sqe.flags & RTIO_SQE_TRANSACTION) {
957 		return iodev_sqe->next;
958 	} else {
959 		return NULL;
960 	}
961 }
962 
963 
964 /**
965  * @brief Get the next sqe in the chain
966  *
967  * @param iodev_sqe Submission queue entry
968  *
969  * @retval NULL if current sqe is last in chain
970  * @retval struct rtio_sqe * if available
971  */
rtio_chain_next(const struct rtio_iodev_sqe * iodev_sqe)972 static inline struct rtio_iodev_sqe *rtio_chain_next(const struct rtio_iodev_sqe *iodev_sqe)
973 {
974 	if (iodev_sqe->sqe.flags & RTIO_SQE_CHAINED) {
975 		return iodev_sqe->next;
976 	} else {
977 		return NULL;
978 	}
979 }
980 
981 /**
982  * @brief Get the next sqe in the chain or transaction
983  *
984  * @param iodev_sqe Submission queue entry
985  *
986  * @retval NULL if current sqe is last in chain
987  * @retval struct rtio_iodev_sqe * if available
988  */
rtio_iodev_sqe_next(const struct rtio_iodev_sqe * iodev_sqe)989 static inline struct rtio_iodev_sqe *rtio_iodev_sqe_next(const struct rtio_iodev_sqe *iodev_sqe)
990 {
991 	return iodev_sqe->next;
992 }
993 
994 /**
995  * @brief Acquire a single submission queue event if available
996  *
997  * @param r RTIO context
998  *
999  * @retval sqe A valid submission queue event acquired from the submission queue
1000  * @retval NULL No subsmission queue event available
1001  */
rtio_sqe_acquire(struct rtio * r)1002 static inline struct rtio_sqe *rtio_sqe_acquire(struct rtio *r)
1003 {
1004 	struct rtio_iodev_sqe *iodev_sqe = rtio_sqe_pool_alloc(r->sqe_pool);
1005 
1006 	if (iodev_sqe == NULL) {
1007 		return NULL;
1008 	}
1009 
1010 	mpsc_push(&r->sq, &iodev_sqe->q);
1011 
1012 	return &iodev_sqe->sqe;
1013 }
1014 
1015 /**
1016  * @brief Drop all previously acquired sqe
1017  *
1018  * @param r RTIO context
1019  */
rtio_sqe_drop_all(struct rtio * r)1020 static inline void rtio_sqe_drop_all(struct rtio *r)
1021 {
1022 	struct rtio_iodev_sqe *iodev_sqe;
1023 	struct mpsc_node *node = mpsc_pop(&r->sq);
1024 
1025 	while (node != NULL) {
1026 		iodev_sqe = CONTAINER_OF(node, struct rtio_iodev_sqe, q);
1027 		rtio_sqe_pool_free(r->sqe_pool, iodev_sqe);
1028 		node = mpsc_pop(&r->sq);
1029 	}
1030 }
1031 
1032 /**
1033  * @brief Acquire a complete queue event if available
1034  */
rtio_cqe_acquire(struct rtio * r)1035 static inline struct rtio_cqe *rtio_cqe_acquire(struct rtio *r)
1036 {
1037 	struct rtio_cqe *cqe = rtio_cqe_pool_alloc(r->cqe_pool);
1038 
1039 	if (cqe == NULL) {
1040 		return NULL;
1041 	}
1042 
1043 	memset(cqe, 0, sizeof(struct rtio_cqe));
1044 
1045 	return cqe;
1046 }
1047 
1048 /**
1049  * @brief Produce a complete queue event if available
1050  */
rtio_cqe_produce(struct rtio * r,struct rtio_cqe * cqe)1051 static inline void rtio_cqe_produce(struct rtio *r, struct rtio_cqe *cqe)
1052 {
1053 	mpsc_push(&r->cq, &cqe->q);
1054 }
1055 
1056 /**
1057  * @brief Consume a single completion queue event if available
1058  *
1059  * If a completion queue event is returned rtio_cq_release(r) must be called
1060  * at some point to release the cqe spot for the cqe producer.
1061  *
1062  * @param r RTIO context
1063  *
1064  * @retval cqe A valid completion queue event consumed from the completion queue
1065  * @retval NULL No completion queue event available
1066  */
rtio_cqe_consume(struct rtio * r)1067 static inline struct rtio_cqe *rtio_cqe_consume(struct rtio *r)
1068 {
1069 	struct mpsc_node *node;
1070 	struct rtio_cqe *cqe = NULL;
1071 
1072 #ifdef CONFIG_RTIO_CONSUME_SEM
1073 	if (k_sem_take(r->consume_sem, K_NO_WAIT) != 0) {
1074 		return NULL;
1075 	}
1076 #endif
1077 
1078 	node = mpsc_pop(&r->cq);
1079 	if (node == NULL) {
1080 		return NULL;
1081 	}
1082 	cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1083 
1084 	return cqe;
1085 }
1086 
1087 /**
1088  * @brief Wait for and consume a single completion queue event
1089  *
1090  * If a completion queue event is returned rtio_cq_release(r) must be called
1091  * at some point to release the cqe spot for the cqe producer.
1092  *
1093  * @param r RTIO context
1094  *
1095  * @retval cqe A valid completion queue event consumed from the completion queue
1096  */
rtio_cqe_consume_block(struct rtio * r)1097 static inline struct rtio_cqe *rtio_cqe_consume_block(struct rtio *r)
1098 {
1099 	struct mpsc_node *node;
1100 	struct rtio_cqe *cqe;
1101 
1102 #ifdef CONFIG_RTIO_CONSUME_SEM
1103 	k_sem_take(r->consume_sem, K_FOREVER);
1104 #endif
1105 	node = mpsc_pop(&r->cq);
1106 	while (node == NULL) {
1107 		Z_SPIN_DELAY(1);
1108 		node = mpsc_pop(&r->cq);
1109 	}
1110 	cqe = CONTAINER_OF(node, struct rtio_cqe, q);
1111 
1112 	return cqe;
1113 }
1114 
1115 /**
1116  * @brief Release consumed completion queue event
1117  *
1118  * @param r RTIO context
1119  * @param cqe Completion queue entry
1120  */
rtio_cqe_release(struct rtio * r,struct rtio_cqe * cqe)1121 static inline void rtio_cqe_release(struct rtio *r, struct rtio_cqe *cqe)
1122 {
1123 	rtio_cqe_pool_free(r->cqe_pool, cqe);
1124 }
1125 
1126 /**
1127  * @brief Compute the CQE flags from the rtio_iodev_sqe entry
1128  *
1129  * @param iodev_sqe The SQE entry in question.
1130  * @return The value that should be set for the CQE's flags field.
1131  */
rtio_cqe_compute_flags(struct rtio_iodev_sqe * iodev_sqe)1132 static inline uint32_t rtio_cqe_compute_flags(struct rtio_iodev_sqe *iodev_sqe)
1133 {
1134 	uint32_t flags = 0;
1135 
1136 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1137 	if (iodev_sqe->sqe.op == RTIO_OP_RX && iodev_sqe->sqe.flags & RTIO_SQE_MEMPOOL_BUFFER) {
1138 		struct rtio *r = iodev_sqe->r;
1139 		struct sys_mem_blocks *mem_pool = r->block_pool;
1140 		int blk_index = (iodev_sqe->sqe.rx.buf - mem_pool->buffer) >>
1141 				mem_pool->info.blk_sz_shift;
1142 		int blk_count = iodev_sqe->sqe.rx.buf_len >> mem_pool->info.blk_sz_shift;
1143 
1144 		flags = RTIO_CQE_FLAG_PREP_MEMPOOL(blk_index, blk_count);
1145 	}
1146 #else
1147 	ARG_UNUSED(iodev_sqe);
1148 #endif
1149 
1150 	return flags;
1151 }
1152 
1153 /**
1154  * @brief Retrieve the mempool buffer that was allocated for the CQE.
1155  *
1156  * If the RTIO context contains a memory pool, and the SQE was created by calling
1157  * rtio_sqe_read_with_pool(), this function can be used to retrieve the memory associated with the
1158  * read. Once processing is done, it should be released by calling rtio_release_buffer().
1159  *
1160  * @param[in] r RTIO context
1161  * @param[in] cqe The CQE handling the event.
1162  * @param[out] buff Pointer to the mempool buffer
1163  * @param[out] buff_len Length of the allocated buffer
1164  * @return 0 on success
1165  * @return -EINVAL if the buffer wasn't allocated for this cqe
1166  * @return -ENOTSUP if memory blocks are disabled
1167  */
1168 __syscall int rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1169 					  uint8_t **buff, uint32_t *buff_len);
1170 
z_impl_rtio_cqe_get_mempool_buffer(const struct rtio * r,struct rtio_cqe * cqe,uint8_t ** buff,uint32_t * buff_len)1171 static inline int z_impl_rtio_cqe_get_mempool_buffer(const struct rtio *r, struct rtio_cqe *cqe,
1172 						     uint8_t **buff, uint32_t *buff_len)
1173 {
1174 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1175 	if (RTIO_CQE_FLAG_GET(cqe->flags) == RTIO_CQE_FLAG_MEMPOOL_BUFFER) {
1176 		int blk_idx = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_IDX(cqe->flags);
1177 		int blk_count = RTIO_CQE_FLAG_MEMPOOL_GET_BLK_CNT(cqe->flags);
1178 		uint32_t blk_size = rtio_mempool_block_size(r);
1179 
1180 		*buff = r->block_pool->buffer + blk_idx * blk_size;
1181 		*buff_len = blk_count * blk_size;
1182 		__ASSERT_NO_MSG(*buff >= r->block_pool->buffer);
1183 		__ASSERT_NO_MSG(*buff <
1184 				r->block_pool->buffer + blk_size * r->block_pool->info.num_blocks);
1185 		return 0;
1186 	}
1187 	return -EINVAL;
1188 #else
1189 	ARG_UNUSED(r);
1190 	ARG_UNUSED(cqe);
1191 	ARG_UNUSED(buff);
1192 	ARG_UNUSED(buff_len);
1193 
1194 	return -ENOTSUP;
1195 #endif
1196 }
1197 
1198 void rtio_executor_submit(struct rtio *r);
1199 void rtio_executor_ok(struct rtio_iodev_sqe *iodev_sqe, int result);
1200 void rtio_executor_err(struct rtio_iodev_sqe *iodev_sqe, int result);
1201 
1202 /**
1203  * @brief Inform the executor of a submission completion with success
1204  *
1205  * This may start the next asynchronous request if one is available.
1206  *
1207  * @param iodev_sqe IODev Submission that has succeeded
1208  * @param result Result of the request
1209  */
rtio_iodev_sqe_ok(struct rtio_iodev_sqe * iodev_sqe,int result)1210 static inline void rtio_iodev_sqe_ok(struct rtio_iodev_sqe *iodev_sqe, int result)
1211 {
1212 	rtio_executor_ok(iodev_sqe, result);
1213 }
1214 
1215 /**
1216  * @brief Inform the executor of a submissions completion with error
1217  *
1218  * This SHALL fail the remaining submissions in the chain.
1219  *
1220  * @param iodev_sqe Submission that has failed
1221  * @param result Result of the request
1222  */
rtio_iodev_sqe_err(struct rtio_iodev_sqe * iodev_sqe,int result)1223 static inline void rtio_iodev_sqe_err(struct rtio_iodev_sqe *iodev_sqe, int result)
1224 {
1225 	rtio_executor_err(iodev_sqe, result);
1226 }
1227 
1228 /**
1229  * Submit a completion queue event with a given result and userdata
1230  *
1231  * Called by the executor to produce a completion queue event, no inherent
1232  * locking is performed and this is not safe to do from multiple callers.
1233  *
1234  * @param r RTIO context
1235  * @param result Integer result code (could be -errno)
1236  * @param userdata Userdata to pass along to completion
1237  * @param flags Flags to use for the CEQ see RTIO_CQE_FLAG_*
1238  */
rtio_cqe_submit(struct rtio * r,int result,void * userdata,uint32_t flags)1239 static inline void rtio_cqe_submit(struct rtio *r, int result, void *userdata, uint32_t flags)
1240 {
1241 	struct rtio_cqe *cqe = rtio_cqe_acquire(r);
1242 
1243 	if (cqe == NULL) {
1244 		atomic_inc(&r->xcqcnt);
1245 	} else {
1246 		cqe->result = result;
1247 		cqe->userdata = userdata;
1248 		cqe->flags = flags;
1249 		rtio_cqe_produce(r, cqe);
1250 	}
1251 
1252 	atomic_inc(&r->cq_count);
1253 #ifdef CONFIG_RTIO_SUBMIT_SEM
1254 	if (r->submit_count > 0) {
1255 		r->submit_count--;
1256 		if (r->submit_count == 0) {
1257 			k_sem_give(r->submit_sem);
1258 		}
1259 	}
1260 #endif
1261 #ifdef CONFIG_RTIO_CONSUME_SEM
1262 	k_sem_give(r->consume_sem);
1263 #endif
1264 }
1265 
1266 #define __RTIO_MEMPOOL_GET_NUM_BLKS(num_bytes, blk_size) (((num_bytes) + (blk_size)-1) / (blk_size))
1267 
1268 /**
1269  * @brief Get the buffer associate with the RX submission
1270  *
1271  * @param[in] iodev_sqe   The submission to probe
1272  * @param[in] min_buf_len The minimum number of bytes needed for the operation
1273  * @param[in] max_buf_len The maximum number of bytes needed for the operation
1274  * @param[out] buf        Where to store the pointer to the buffer
1275  * @param[out] buf_len    Where to store the size of the buffer
1276  *
1277  * @return 0 if @p buf and @p buf_len were successfully filled
1278  * @return -ENOMEM Not enough memory for @p min_buf_len
1279  */
rtio_sqe_rx_buf(const struct rtio_iodev_sqe * iodev_sqe,uint32_t min_buf_len,uint32_t max_buf_len,uint8_t ** buf,uint32_t * buf_len)1280 static inline int rtio_sqe_rx_buf(const struct rtio_iodev_sqe *iodev_sqe, uint32_t min_buf_len,
1281 				  uint32_t max_buf_len, uint8_t **buf, uint32_t *buf_len)
1282 {
1283 	struct rtio_sqe *sqe = (struct rtio_sqe *)&iodev_sqe->sqe;
1284 
1285 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1286 	if (sqe->op == RTIO_OP_RX && sqe->flags & RTIO_SQE_MEMPOOL_BUFFER) {
1287 		struct rtio *r = iodev_sqe->r;
1288 
1289 		if (sqe->rx.buf != NULL) {
1290 			if (sqe->rx.buf_len < min_buf_len) {
1291 				return -ENOMEM;
1292 			}
1293 			*buf = sqe->rx.buf;
1294 			*buf_len = sqe->rx.buf_len;
1295 			return 0;
1296 		}
1297 
1298 		int rc = rtio_block_pool_alloc(r, min_buf_len, max_buf_len, buf, buf_len);
1299 		if (rc == 0) {
1300 			sqe->rx.buf = *buf;
1301 			sqe->rx.buf_len = *buf_len;
1302 			return 0;
1303 		}
1304 
1305 		return -ENOMEM;
1306 	}
1307 #else
1308 	ARG_UNUSED(max_buf_len);
1309 #endif
1310 
1311 	if (sqe->rx.buf_len < min_buf_len) {
1312 		return -ENOMEM;
1313 	}
1314 
1315 	*buf = sqe->rx.buf;
1316 	*buf_len = sqe->rx.buf_len;
1317 	return 0;
1318 }
1319 
1320 /**
1321  * @brief Release memory that was allocated by the RTIO's memory pool
1322  *
1323  * If the RTIO context was created by a call to RTIO_DEFINE_WITH_MEMPOOL(), then the cqe data might
1324  * contain a buffer that's owned by the RTIO context. In those cases (if the read request was
1325  * configured via rtio_sqe_read_with_pool()) the buffer must be returned back to the pool.
1326  *
1327  * Call this function when processing is complete. This function will validate that the memory
1328  * actually belongs to the RTIO context and will ignore invalid arguments.
1329  *
1330  * @param r RTIO context
1331  * @param buff Pointer to the buffer to be released.
1332  * @param buff_len Number of bytes to free (will be rounded up to nearest memory block).
1333  */
1334 __syscall void rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len);
1335 
z_impl_rtio_release_buffer(struct rtio * r,void * buff,uint32_t buff_len)1336 static inline void z_impl_rtio_release_buffer(struct rtio *r, void *buff, uint32_t buff_len)
1337 {
1338 #ifdef CONFIG_RTIO_SYS_MEM_BLOCKS
1339 	if (r == NULL || buff == NULL || r->block_pool == NULL || buff_len == 0) {
1340 		return;
1341 	}
1342 
1343 	rtio_block_pool_free(r, buff, buff_len);
1344 #else
1345 	ARG_UNUSED(r);
1346 	ARG_UNUSED(buff);
1347 	ARG_UNUSED(buff_len);
1348 #endif
1349 }
1350 
1351 /**
1352  * Grant access to an RTIO context to a user thread
1353  */
rtio_access_grant(struct rtio * r,struct k_thread * t)1354 static inline void rtio_access_grant(struct rtio *r, struct k_thread *t)
1355 {
1356 	k_object_access_grant(r, t);
1357 
1358 #ifdef CONFIG_RTIO_SUBMIT_SEM
1359 	k_object_access_grant(r->submit_sem, t);
1360 #endif
1361 
1362 #ifdef CONFIG_RTIO_CONSUME_SEM
1363 	k_object_access_grant(r->consume_sem, t);
1364 #endif
1365 }
1366 
1367 /**
1368  * @brief Attempt to cancel an SQE
1369  *
1370  * If possible (not currently executing), cancel an SQE and generate a failure with -ECANCELED
1371  * result.
1372  *
1373  * @param[in] sqe The SQE to cancel
1374  * @return 0 if the SQE was flagged for cancellation
1375  * @return <0 on error
1376  */
1377 __syscall int rtio_sqe_cancel(struct rtio_sqe *sqe);
1378 
z_impl_rtio_sqe_cancel(struct rtio_sqe * sqe)1379 static inline int z_impl_rtio_sqe_cancel(struct rtio_sqe *sqe)
1380 {
1381 	struct rtio_iodev_sqe *iodev_sqe = CONTAINER_OF(sqe, struct rtio_iodev_sqe, sqe);
1382 
1383 	do {
1384 		iodev_sqe->sqe.flags |= RTIO_SQE_CANCELED;
1385 		iodev_sqe = rtio_iodev_sqe_next(iodev_sqe);
1386 	} while (iodev_sqe != NULL);
1387 
1388 	return 0;
1389 }
1390 
1391 /**
1392  * @brief Copy an array of SQEs into the queue and get resulting handles back
1393  *
1394  * Copies one or more SQEs into the RTIO context and optionally returns their generated SQE handles.
1395  * Handles can be used to cancel events via the rtio_sqe_cancel() call.
1396  *
1397  * @param[in]  r RTIO context
1398  * @param[in]  sqes Pointer to an array of SQEs
1399  * @param[out] handle Optional pointer to @ref rtio_sqe pointer to store the handle of the
1400  *             first generated SQE. Use NULL to ignore.
1401  * @param[in]  sqe_count Count of sqes in array
1402  *
1403  * @retval 0 success
1404  * @retval -ENOMEM not enough room in the queue
1405  */
1406 __syscall int rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1407 					   struct rtio_sqe **handle, size_t sqe_count);
1408 
z_impl_rtio_sqe_copy_in_get_handles(struct rtio * r,const struct rtio_sqe * sqes,struct rtio_sqe ** handle,size_t sqe_count)1409 static inline int z_impl_rtio_sqe_copy_in_get_handles(struct rtio *r, const struct rtio_sqe *sqes,
1410 						      struct rtio_sqe **handle,
1411 						      size_t sqe_count)
1412 {
1413 	struct rtio_sqe *sqe;
1414 	uint32_t acquirable = rtio_sqe_acquirable(r);
1415 
1416 	if (acquirable < sqe_count) {
1417 		return -ENOMEM;
1418 	}
1419 
1420 	for (unsigned long i = 0; i < sqe_count; i++) {
1421 		sqe = rtio_sqe_acquire(r);
1422 		__ASSERT_NO_MSG(sqe != NULL);
1423 		if (handle != NULL && i == 0) {
1424 			*handle = sqe;
1425 		}
1426 		*sqe = sqes[i];
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 /**
1433  * @brief Copy an array of SQEs into the queue
1434  *
1435  * Useful if a batch of submissions is stored in ROM or
1436  * RTIO is used from user mode where a copy must be made.
1437  *
1438  * Partial copying is not done as chained SQEs need to be submitted
1439  * as a whole set.
1440  *
1441  * @param r RTIO context
1442  * @param sqes Pointer to an array of SQEs
1443  * @param sqe_count Count of sqes in array
1444  *
1445  * @retval 0 success
1446  * @retval -ENOMEM not enough room in the queue
1447  */
rtio_sqe_copy_in(struct rtio * r,const struct rtio_sqe * sqes,size_t sqe_count)1448 static inline int rtio_sqe_copy_in(struct rtio *r, const struct rtio_sqe *sqes, size_t sqe_count)
1449 {
1450 	return rtio_sqe_copy_in_get_handles(r, sqes, NULL, sqe_count);
1451 }
1452 
1453 /**
1454  * @brief Copy an array of CQEs from the queue
1455  *
1456  * Copies from the RTIO context and its queue completion queue
1457  * events, waiting for the given time period to gather the number
1458  * of completions requested.
1459  *
1460  * @param r RTIO context
1461  * @param cqes Pointer to an array of SQEs
1462  * @param cqe_count Count of sqes in array
1463  * @param timeout Timeout to wait for each completion event. Total wait time is
1464  *                potentially timeout*cqe_count at maximum.
1465  *
1466  * @retval copy_count Count of copied CQEs (0 to cqe_count)
1467  */
1468 __syscall int rtio_cqe_copy_out(struct rtio *r,
1469 				struct rtio_cqe *cqes,
1470 				size_t cqe_count,
1471 				k_timeout_t timeout);
z_impl_rtio_cqe_copy_out(struct rtio * r,struct rtio_cqe * cqes,size_t cqe_count,k_timeout_t timeout)1472 static inline int z_impl_rtio_cqe_copy_out(struct rtio *r,
1473 					   struct rtio_cqe *cqes,
1474 					   size_t cqe_count,
1475 					   k_timeout_t timeout)
1476 {
1477 	size_t copied = 0;
1478 	struct rtio_cqe *cqe;
1479 	k_timepoint_t end = sys_timepoint_calc(timeout);
1480 
1481 	do {
1482 		cqe = K_TIMEOUT_EQ(timeout, K_FOREVER) ? rtio_cqe_consume_block(r)
1483 						       : rtio_cqe_consume(r);
1484 		if (cqe == NULL) {
1485 			Z_SPIN_DELAY(25);
1486 			continue;
1487 		}
1488 		cqes[copied++] = *cqe;
1489 		rtio_cqe_release(r, cqe);
1490 	} while (copied < cqe_count && !sys_timepoint_expired(end));
1491 
1492 	return copied;
1493 }
1494 
1495 /**
1496  * @brief Submit I/O requests to the underlying executor
1497  *
1498  * Submits the queue of submission queue events to the executor.
1499  * The executor will do the work of managing tasks representing each
1500  * submission chain, freeing submission queue events when done, and
1501  * producing completion queue events as submissions are completed.
1502  *
1503  * @param r RTIO context
1504  * @param wait_count Number of submissions to wait for completion of.
1505  *
1506  * @retval 0 On success
1507  */
1508 __syscall int rtio_submit(struct rtio *r, uint32_t wait_count);
1509 
z_impl_rtio_submit(struct rtio * r,uint32_t wait_count)1510 static inline int z_impl_rtio_submit(struct rtio *r, uint32_t wait_count)
1511 {
1512 	int res = 0;
1513 
1514 #ifdef CONFIG_RTIO_SUBMIT_SEM
1515 	/* TODO undefined behavior if another thread calls submit of course
1516 	 */
1517 	if (wait_count > 0) {
1518 		__ASSERT(!k_is_in_isr(),
1519 			 "expected rtio submit with wait count to be called from a thread");
1520 
1521 		k_sem_reset(r->submit_sem);
1522 		r->submit_count = wait_count;
1523 	}
1524 #else
1525 	uintptr_t cq_count = (uintptr_t)atomic_get(&r->cq_count) + wait_count;
1526 #endif
1527 
1528 	/* Submit the queue to the executor which consumes submissions
1529 	 * and produces completions through ISR chains or other means.
1530 	 */
1531 	rtio_executor_submit(r);
1532 
1533 
1534 	/* TODO could be nicer if we could suspend the thread and not
1535 	 * wake up on each completion here.
1536 	 */
1537 #ifdef CONFIG_RTIO_SUBMIT_SEM
1538 
1539 	if (wait_count > 0) {
1540 		res = k_sem_take(r->submit_sem, K_FOREVER);
1541 		__ASSERT(res == 0,
1542 			 "semaphore was reset or timed out while waiting on completions!");
1543 	}
1544 #else
1545 	while ((uintptr_t)atomic_get(&r->cq_count) < cq_count) {
1546 		Z_SPIN_DELAY(10);
1547 		k_yield();
1548 	}
1549 #endif
1550 
1551 	return res;
1552 }
1553 
1554 /**
1555  * @}
1556  */
1557 
1558 #ifdef __cplusplus
1559 }
1560 #endif
1561 
1562 #include <zephyr/syscalls/rtio.h>
1563 
1564 #endif /* ZEPHYR_INCLUDE_RTIO_RTIO_H_ */
1565