1 /*
2 * Copyright (c) 2021 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/atomic.h>
11 #include <zephyr/sys/kobject.h>
12 #include <zephyr/sys/libc-hooks.h>
13 #include <zephyr/app_memory/mem_domain.h>
14 #include <zephyr/sys/util_loops.h>
15 #include <zephyr/sys/time_units.h>
16 #include <zephyr/timing/timing.h>
17 #include <zephyr/rtio/rtio.h>
18
19 #include "rtio_iodev_test.h"
20
21 /* Repeat tests to ensure they are repeatable */
22 #define TEST_REPEATS 4
23
24 #define MEM_BLK_COUNT 4
25 #define MEM_BLK_SIZE 16
26 #define MEM_BLK_ALIGN 4
27
28 #define SQE_POOL_SIZE 5
29 #define CQE_POOL_SIZE 5
30
31 /*
32 * Purposefully double the block count and half the block size. This leaves the same size mempool,
33 * but ensures that allocation is done in larger blocks because the tests assume a larger block
34 * size.
35 */
36 RTIO_DEFINE_WITH_MEMPOOL(r_simple, SQE_POOL_SIZE, CQE_POOL_SIZE, MEM_BLK_COUNT * 2,
37 MEM_BLK_SIZE / 2, MEM_BLK_ALIGN);
38
39 RTIO_IODEV_TEST_DEFINE(iodev_test_simple);
40
41 /**
42 * @brief Test the basics of the RTIO API
43 *
44 * Ensures that we can setup an RTIO context, enqueue a request, and receive
45 * a completion event.
46 */
test_rtio_simple_(struct rtio * r)47 void test_rtio_simple_(struct rtio *r)
48 {
49 int res;
50 uintptr_t userdata[2] = {0, 1};
51 struct rtio_sqe *sqe;
52 struct rtio_cqe *cqe;
53
54 rtio_iodev_test_init(&iodev_test_simple);
55
56 TC_PRINT("setting up single no-op\n");
57 sqe = rtio_sqe_acquire(r);
58 zassert_not_null(sqe, "Expected a valid sqe");
59 rtio_sqe_prep_nop(sqe, (struct rtio_iodev *)&iodev_test_simple, &userdata[0]);
60
61 TC_PRINT("submit with wait\n");
62 res = rtio_submit(r, 1);
63 zassert_ok(res, "Should return ok from rtio_execute");
64
65 cqe = rtio_cqe_consume(r);
66 zassert_not_null(cqe, "Expected a valid cqe");
67 zassert_ok(cqe->result, "Result should be ok");
68 zassert_equal_ptr(cqe->userdata, &userdata[0], "Expected userdata back");
69 rtio_cqe_release(r, cqe);
70 }
71
ZTEST(rtio_api,test_rtio_simple)72 ZTEST(rtio_api, test_rtio_simple)
73 {
74 TC_PRINT("rtio simple simple\n");
75 for (int i = 0; i < TEST_REPEATS; i++) {
76 test_rtio_simple_(&r_simple);
77 }
78 }
79
ZTEST(rtio_api,test_rtio_no_response)80 ZTEST(rtio_api, test_rtio_no_response)
81 {
82 int res;
83 uintptr_t userdata[2] = {0, 1};
84 struct rtio_sqe *sqe;
85 struct rtio_cqe cqe;
86
87 rtio_iodev_test_init(&iodev_test_simple);
88
89 sqe = rtio_sqe_acquire(&r_simple);
90 zassert_not_null(sqe, "Expected a valid sqe");
91 rtio_sqe_prep_nop(sqe, (struct rtio_iodev *)&iodev_test_simple, &userdata[0]);
92 sqe->flags |= RTIO_SQE_NO_RESPONSE;
93
94 res = rtio_submit(&r_simple, 0);
95 zassert_ok(res, "Should return ok from rtio_execute");
96
97 res = rtio_cqe_copy_out(&r_simple, &cqe, 1, K_MSEC(500));
98 zassert_equal(0, res, "Expected no CQEs");
99 }
100
101 RTIO_DEFINE(r_chain, SQE_POOL_SIZE, CQE_POOL_SIZE);
102
103 RTIO_IODEV_TEST_DEFINE(iodev_test_chain0);
104 RTIO_IODEV_TEST_DEFINE(iodev_test_chain1);
105 struct rtio_iodev *iodev_test_chain[] = {&iodev_test_chain0, &iodev_test_chain1};
106
107 /**
108 * @brief Test chained requests
109 *
110 * Ensures that we can setup an RTIO context, enqueue a chained requests,
111 * and receive completion events in the correct order given the chained
112 * flag and multiple devices where serialization isn't guaranteed.
113 */
test_rtio_chain_(struct rtio * r)114 void test_rtio_chain_(struct rtio *r)
115 {
116 int res;
117 uint32_t userdata[4] = {0, 1, 2, 3};
118 struct rtio_sqe *sqe;
119 struct rtio_cqe *cqe;
120 uintptr_t cq_count = atomic_get(&r->cq_count);
121
122 for (int i = 0; i < 4; i++) {
123 sqe = rtio_sqe_acquire(r);
124 zassert_not_null(sqe, "Expected a valid sqe");
125 rtio_sqe_prep_nop(sqe, iodev_test_chain[i % 2],
126 &userdata[i]);
127 sqe->flags |= RTIO_SQE_CHAINED;
128 TC_PRINT("produce %d, sqe %p, userdata %d\n", i, sqe, userdata[i]);
129 }
130
131 /* Clear the last one */
132 sqe->flags = 0;
133
134 TC_PRINT("submitting\n");
135
136 res = rtio_submit(r, 4);
137 TC_PRINT("checking cq\n");
138 zassert_ok(res, "Should return ok from rtio_execute");
139 zassert_equal(atomic_get(&r->cq_count) - cq_count, 4, "Should have 4 pending completions");
140
141 for (int i = 0; i < 4; i++) {
142 cqe = rtio_cqe_consume(r);
143 zassert_not_null(cqe, "Expected a valid cqe");
144 TC_PRINT("consume %d, cqe %p, userdata %d\n", i, cqe, *(uint32_t *)cqe->userdata);
145 zassert_ok(cqe->result, "Result should be ok");
146
147 zassert_equal_ptr(cqe->userdata, &userdata[i], "Expected in order completions");
148 rtio_cqe_release(r, cqe);
149 }
150 }
151
ZTEST(rtio_api,test_rtio_chain)152 ZTEST(rtio_api, test_rtio_chain)
153 {
154 TC_PRINT("initializing iodev test devices\n");
155
156 for (int i = 0; i < 2; i++) {
157 rtio_iodev_test_init(iodev_test_chain[i]);
158 }
159
160 TC_PRINT("rtio chain simple\n");
161 for (int i = 0; i < TEST_REPEATS; i++) {
162 test_rtio_chain_(&r_chain);
163 }
164 }
165
166 RTIO_DEFINE(r_multi_chain, SQE_POOL_SIZE, CQE_POOL_SIZE);
167
168 RTIO_IODEV_TEST_DEFINE(iodev_test_multi0);
169 RTIO_IODEV_TEST_DEFINE(iodev_test_multi1);
170 struct rtio_iodev *iodev_test_multi[] = {&iodev_test_multi0, &iodev_test_multi1};
171
172 /**
173 * @brief Test multiple asynchronous chains against one iodev
174 */
test_rtio_multiple_chains_(struct rtio * r)175 void test_rtio_multiple_chains_(struct rtio *r)
176 {
177 int res;
178 uintptr_t userdata[4] = {0, 1, 2, 3};
179 struct rtio_sqe *sqe;
180 struct rtio_cqe *cqe = NULL;
181
182 for (int i = 0; i < 2; i++) {
183 for (int j = 0; j < 2; j++) {
184 sqe = rtio_sqe_acquire(r);
185 zassert_not_null(sqe, "Expected a valid sqe");
186 rtio_sqe_prep_nop(sqe, iodev_test_multi[i],
187 (void *)userdata[i*2 + j]);
188 if (j == 0) {
189 sqe->flags |= RTIO_SQE_CHAINED;
190 } else {
191 sqe->flags |= 0;
192 }
193 }
194 }
195
196 TC_PRINT("calling submit from test case\n");
197 res = rtio_submit(r, 0);
198 zassert_ok(res, "Should return ok from rtio_execute");
199
200 bool seen[4] = { 0 };
201
202 TC_PRINT("waiting for 4 completions\n");
203 for (int i = 0; i < 4; i++) {
204 TC_PRINT("waiting on completion %d\n", i);
205
206 cqe = rtio_cqe_consume(r);
207 while (cqe == NULL) {
208 k_sleep(K_MSEC(1));
209 cqe = rtio_cqe_consume(r);
210 }
211
212 TC_PRINT("consumed cqe %p, result, %d, userdata %lu\n", cqe,
213 cqe->result, (uintptr_t)cqe->userdata);
214
215 zassert_not_null(cqe, "Expected a valid cqe");
216 zassert_ok(cqe->result, "Result should be ok");
217 seen[(uintptr_t)cqe->userdata] = true;
218 if (seen[1]) {
219 zassert_true(seen[0], "Should see 0 before 1");
220 }
221 if (seen[3]) {
222 zassert_true(seen[2], "Should see 2 before 3");
223 }
224 rtio_cqe_release(r, cqe);
225 }
226 }
227
ZTEST(rtio_api,test_rtio_multiple_chains)228 ZTEST(rtio_api, test_rtio_multiple_chains)
229 {
230 for (int i = 0; i < 2; i++) {
231 rtio_iodev_test_init(iodev_test_multi[i]);
232 }
233
234 TC_PRINT("rtio multiple chains\n");
235 test_rtio_multiple_chains_(&r_multi_chain);
236 }
237
238 #ifdef CONFIG_USERSPACE
239 struct k_mem_domain rtio_domain;
240 #endif
241
242 RTIO_BMEM uint8_t syscall_bufs[4];
243
244 RTIO_DEFINE(r_syscall, SQE_POOL_SIZE, CQE_POOL_SIZE);
245 RTIO_IODEV_TEST_DEFINE(iodev_test_syscall);
246
ZTEST_USER(rtio_api,test_rtio_syscalls)247 ZTEST_USER(rtio_api, test_rtio_syscalls)
248 {
249 int res;
250 struct rtio_sqe sqe = {0};
251 struct rtio_cqe cqe = {0};
252
253 struct rtio *r = &r_syscall;
254
255 for (int i = 0; i < 4; i++) {
256 TC_PRINT("copying sqe in from stack\n");
257 /* Not really legal from userspace! Ugh */
258 rtio_sqe_prep_nop(&sqe, &iodev_test_syscall,
259 &syscall_bufs[i]);
260 res = rtio_sqe_copy_in(r, &sqe, 1);
261 zassert_equal(res, 0, "Expected success copying sqe");
262 }
263
264 TC_PRINT("submitting\n");
265 res = rtio_submit(r, 4);
266
267 for (int i = 0; i < 4; i++) {
268 TC_PRINT("consume %d\n", i);
269 res = rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER);
270 zassert_equal(res, 1, "Expected success copying cqe");
271 zassert_ok(cqe.result, "Result should be ok");
272 zassert_equal_ptr(cqe.userdata, &syscall_bufs[i],
273 "Expected in order completions");
274 }
275 }
276
277 RTIO_BMEM uint8_t mempool_data[MEM_BLK_SIZE];
278
test_rtio_simple_mempool_(struct rtio * r,int run_count)279 static void test_rtio_simple_mempool_(struct rtio *r, int run_count)
280 {
281 int res;
282 struct rtio_sqe sqe = {0};
283 struct rtio_cqe cqe = {0};
284
285 for (int i = 0; i < MEM_BLK_SIZE; ++i) {
286 mempool_data[i] = i + run_count;
287 }
288
289 TC_PRINT("setting up single mempool read %p\n", r);
290 rtio_sqe_prep_read_with_pool(&sqe, (struct rtio_iodev *)&iodev_test_simple, 0,
291 mempool_data);
292 TC_PRINT("Calling rtio_sqe_copy_in()\n");
293 res = rtio_sqe_copy_in(r, &sqe, 1);
294 zassert_ok(res);
295
296 TC_PRINT("submit with wait\n");
297 res = rtio_submit(r, 1);
298 zassert_ok(res, "Should return ok from rtio_submit");
299
300 TC_PRINT("Calling rtio_cqe_copy_out\n");
301 res = rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER);
302 zassert_equal(1, res);
303 TC_PRINT("cqe result %d, userdata %p\n", cqe.result, cqe.userdata);
304 zassert_ok(cqe.result, "Result should be ok");
305 zassert_equal_ptr(cqe.userdata, mempool_data, "Expected userdata back");
306
307 uint8_t *buffer = NULL;
308 uint32_t buffer_len = 0;
309
310 TC_PRINT("Calling rtio_cqe_get_mempool_buffer\n");
311 zassert_ok(rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len));
312
313 zassert_not_null(buffer, "Expected an allocated mempool buffer");
314 zassert_equal(buffer_len, MEM_BLK_SIZE);
315 zassert_mem_equal(buffer, mempool_data, MEM_BLK_SIZE, "Data expected to be the same");
316 TC_PRINT("Calling rtio_cqe_get_mempool_buffer\n");
317 rtio_release_buffer(r, buffer, buffer_len);
318 }
319
ZTEST_USER(rtio_api,test_rtio_simple_mempool)320 ZTEST_USER(rtio_api, test_rtio_simple_mempool)
321 {
322 for (int i = 0; i < TEST_REPEATS * 2; i++) {
323 test_rtio_simple_mempool_(&r_simple, i);
324 }
325 }
326
test_rtio_simple_cancel_(struct rtio * r)327 static void test_rtio_simple_cancel_(struct rtio *r)
328 {
329 struct rtio_sqe sqe[SQE_POOL_SIZE];
330 struct rtio_cqe cqe;
331 struct rtio_sqe *handle;
332
333 rtio_sqe_prep_nop(sqe, (struct rtio_iodev *)&iodev_test_simple, NULL);
334 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 1);
335 rtio_sqe_cancel(handle);
336 TC_PRINT("Submitting 1 to RTIO\n");
337 rtio_submit(r, 0);
338
339 /* Check that we don't get a CQE */
340 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
341
342 /* Check that the SQE pool is empty by filling it all the way */
343 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
344 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
345 }
346 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
347
348 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
349 rtio_submit(r, SQE_POOL_SIZE);
350 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
351 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
352 }
353 }
354
ZTEST_USER(rtio_api,test_rtio_simple_cancel)355 ZTEST_USER(rtio_api, test_rtio_simple_cancel)
356 {
357 for (int i = 0; i < TEST_REPEATS; i++) {
358 test_rtio_simple_cancel_(&r_simple);
359 }
360 }
361
test_rtio_chain_cancel_(struct rtio * r)362 static void test_rtio_chain_cancel_(struct rtio *r)
363 {
364 struct rtio_sqe sqe[SQE_POOL_SIZE];
365 struct rtio_cqe cqe;
366 struct rtio_sqe *handle;
367
368 /* Prepare the chain */
369 rtio_sqe_prep_nop(&sqe[0], (struct rtio_iodev *)&iodev_test_simple, NULL);
370 rtio_sqe_prep_nop(&sqe[1], (struct rtio_iodev *)&iodev_test_simple, NULL);
371 sqe[0].flags |= RTIO_SQE_CHAINED;
372
373 /* Copy the chain */
374 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 2);
375 rtio_sqe_cancel(handle);
376 k_msleep(20);
377 rtio_submit(r, 0);
378
379 /* Check that we don't get cancelled completion notifications */
380 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
381
382 /* Check that the SQE pool is empty by filling it all the way */
383 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
384 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
385 }
386 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
387
388 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
389 rtio_submit(r, SQE_POOL_SIZE);
390 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
391 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
392 }
393
394 /* Try cancelling the middle sqe in a chain */
395 rtio_sqe_prep_nop(&sqe[0], (struct rtio_iodev *)&iodev_test_simple, NULL);
396 rtio_sqe_prep_nop(&sqe[1], (struct rtio_iodev *)&iodev_test_simple, NULL);
397 rtio_sqe_prep_nop(&sqe[2], (struct rtio_iodev *)&iodev_test_simple, NULL);
398 sqe[0].flags |= RTIO_SQE_CHAINED;
399 sqe[1].flags |= RTIO_SQE_CHAINED | RTIO_SQE_CANCELED;
400
401 /* Copy in the first non cancelled sqe */
402 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 3);
403 rtio_submit(r, 1);
404
405 /* Check that we get one completion no cancellation notifications */
406 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
407
408 /* Check that we get no more completions for the cancelled submissions */
409 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
410
411 /* Check that the SQE pool is empty by filling it all the way */
412 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
413 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
414 }
415 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
416
417 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
418 rtio_submit(r, SQE_POOL_SIZE);
419 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
420 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
421 }
422 }
423
ZTEST_USER(rtio_api,test_rtio_chain_cancel)424 ZTEST_USER(rtio_api, test_rtio_chain_cancel)
425 {
426 TC_PRINT("start test\n");
427 k_msleep(20);
428 for (int i = 0; i < TEST_REPEATS; i++) {
429 test_rtio_chain_cancel_(&r_simple);
430 }
431 }
432
test_rtio_transaction_cancel_(struct rtio * r)433 static void test_rtio_transaction_cancel_(struct rtio *r)
434 {
435 struct rtio_sqe sqe[SQE_POOL_SIZE];
436 struct rtio_cqe cqe;
437 struct rtio_sqe *handle;
438
439 /* Prepare the chain */
440 rtio_sqe_prep_nop(&sqe[0], (struct rtio_iodev *)&iodev_test_simple, NULL);
441 rtio_sqe_prep_nop(&sqe[1], (struct rtio_iodev *)&iodev_test_simple, NULL);
442 sqe[0].flags |= RTIO_SQE_TRANSACTION;
443
444 /* Copy the chain */
445 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 2);
446 rtio_sqe_cancel(handle);
447 TC_PRINT("Submitting 2 to RTIO\n");
448 rtio_submit(r, 0);
449
450 /* Check that we don't get a CQE */
451 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
452
453 /* Check that the SQE pool is empty by filling it all the way */
454 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
455 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
456 }
457 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
458
459 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
460 rtio_submit(r, SQE_POOL_SIZE);
461 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
462 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
463 }
464 }
465
ZTEST_USER(rtio_api,test_rtio_transaction_cancel)466 ZTEST_USER(rtio_api, test_rtio_transaction_cancel)
467 {
468 for (int i = 0; i < TEST_REPEATS; i++) {
469 test_rtio_transaction_cancel_(&r_simple);
470 }
471 }
472
test_rtio_simple_multishot_(struct rtio * r,int idx)473 static inline void test_rtio_simple_multishot_(struct rtio *r, int idx)
474 {
475 int res;
476 struct rtio_sqe sqe;
477 struct rtio_cqe cqe;
478 struct rtio_sqe *handle;
479
480 for (int i = 0; i < MEM_BLK_SIZE; ++i) {
481 mempool_data[i] = i + idx;
482 }
483
484 TC_PRINT("setting up single mempool read\n");
485 rtio_sqe_prep_read_multishot(&sqe, (struct rtio_iodev *)&iodev_test_simple, 0,
486 mempool_data);
487 TC_PRINT("Calling rtio_sqe_copy_in()\n");
488 res = rtio_sqe_copy_in_get_handles(r, &sqe, &handle, 1);
489 zassert_ok(res);
490
491 TC_PRINT("submit with wait, handle=%p\n", handle);
492 res = rtio_submit(r, 1);
493 zassert_ok(res, "Should return ok from rtio_execute");
494
495 TC_PRINT("Calling rtio_cqe_copy_out\n");
496 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
497 zassert_ok(cqe.result, "Result should be ok but got %d", cqe.result);
498 zassert_equal_ptr(cqe.userdata, mempool_data, "Expected userdata back");
499
500 uint8_t *buffer = NULL;
501 uint32_t buffer_len = 0;
502
503 TC_PRINT("Calling rtio_cqe_get_mempool_buffer\n");
504 zassert_ok(rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len));
505
506 zassert_not_null(buffer, "Expected an allocated mempool buffer");
507 zassert_equal(buffer_len, MEM_BLK_SIZE);
508 zassert_mem_equal(buffer, mempool_data, MEM_BLK_SIZE, "Data expected to be the same");
509 TC_PRINT("Calling rtio_release_buffer\n");
510 rtio_release_buffer(r, buffer, buffer_len);
511
512 TC_PRINT("Waiting for next cqe\n");
513 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
514 zassert_ok(cqe.result, "Result should be ok but got %d", cqe.result);
515 zassert_equal_ptr(cqe.userdata, mempool_data, "Expected userdata back");
516 rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len);
517 rtio_release_buffer(r, buffer, buffer_len);
518
519 TC_PRINT("Canceling %p\n", handle);
520 rtio_sqe_cancel(handle);
521 /* Flush any pending CQEs */
522 while (rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)) != 0) {
523 rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len);
524 rtio_release_buffer(r, buffer, buffer_len);
525 }
526 }
527
ZTEST_USER(rtio_api,test_rtio_multishot)528 ZTEST_USER(rtio_api, test_rtio_multishot)
529 {
530 for (int i = 0; i < TEST_REPEATS; i++) {
531 test_rtio_simple_multishot_(&r_simple, i);
532 }
533 }
534
ZTEST(rtio_api,test_rtio_multishot_are_not_resubmitted_when_failed)535 ZTEST(rtio_api, test_rtio_multishot_are_not_resubmitted_when_failed)
536 {
537 int res;
538 struct rtio_sqe sqe;
539 struct rtio_cqe cqe;
540 struct rtio_sqe *handle;
541 struct rtio *r = &r_simple;
542 uint8_t *buffer = NULL;
543 uint32_t buffer_len = 0;
544
545 for (int i = 0 ; i < MEM_BLK_SIZE; i++) {
546 mempool_data[i] = i;
547 }
548
549 rtio_sqe_prep_read_multishot(&sqe, (struct rtio_iodev *)&iodev_test_simple, 0,
550 mempool_data);
551 res = rtio_sqe_copy_in_get_handles(r, &sqe, &handle, 1);
552 zassert_ok(res);
553
554 rtio_iodev_test_set_result(&iodev_test_simple, -EIO);
555
556 rtio_submit(r, 1);
557
558 /** The multi-shot SQE should fail, transmit the result and stop resubmitting. */
559 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(100)));
560 zassert_equal(cqe.result, -EIO, "Result should be %d but got %d", -EIO, cqe.result);
561
562 /* No more CQE's coming as it should be aborted */
563 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(100)),
564 "Should not get more CQEs after the error CQE");
565
566 rtio_sqe_drop_all(r);
567
568 /* Flush any pending CQEs */
569 while (rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(1000)) != 0) {
570 rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len);
571 rtio_release_buffer(r, buffer, buffer_len);
572 }
573 }
574
575 RTIO_DEFINE(r_transaction, SQE_POOL_SIZE, CQE_POOL_SIZE);
576
577 RTIO_IODEV_TEST_DEFINE(iodev_test_transaction0);
578 RTIO_IODEV_TEST_DEFINE(iodev_test_transaction1);
579 struct rtio_iodev *iodev_test_transaction[] = {&iodev_test_transaction0, &iodev_test_transaction1};
580
581 /**
582 * @brief Test transaction requests
583 *
584 * Ensures that we can setup an RTIO context, enqueue a transaction requests,
585 * and receive completion events in the correct order given the transaction
586 * flag and multiple devices where serialization isn't guaranteed.
587 */
test_rtio_transaction_(struct rtio * r)588 void test_rtio_transaction_(struct rtio *r)
589 {
590 int res;
591 uintptr_t userdata[2] = {0, 1};
592 struct rtio_sqe *sqe;
593 struct rtio_cqe *cqe;
594 bool seen[2] = { 0 };
595 uintptr_t cq_count = atomic_get(&r->cq_count);
596
597 sqe = rtio_sqe_acquire(r);
598 zassert_not_null(sqe, "Expected a valid sqe");
599 rtio_sqe_prep_nop(sqe, &iodev_test_transaction0, NULL);
600 sqe->flags |= RTIO_SQE_TRANSACTION;
601
602 sqe = rtio_sqe_acquire(r);
603 zassert_not_null(sqe, "Expected a valid sqe");
604 rtio_sqe_prep_nop(sqe, NULL, &userdata[0]);
605
606 sqe = rtio_sqe_acquire(r);
607 zassert_not_null(sqe, "Expected a valid sqe");
608 rtio_sqe_prep_nop(sqe, &iodev_test_transaction1, NULL);
609 sqe->flags |= RTIO_SQE_TRANSACTION;
610
611 sqe = rtio_sqe_acquire(r);
612 zassert_not_null(sqe, "Expected a valid sqe");
613 rtio_sqe_prep_nop(sqe, NULL,
614 &userdata[1]);
615
616 TC_PRINT("submitting userdata 0 %p, userdata 1 %p\n", &userdata[0], &userdata[1]);
617 res = rtio_submit(r, 4);
618 TC_PRINT("checking cq, completions available, count at start %lu, current count %lu\n",
619 cq_count, atomic_get(&r->cq_count));
620 zassert_ok(res, "Should return ok from rtio_execute");
621 zassert_equal(atomic_get(&r->cq_count) - cq_count, 4, "Should have 4 pending completions");
622
623 for (int i = 0; i < 4; i++) {
624 TC_PRINT("consume %d\n", i);
625 cqe = rtio_cqe_consume(r);
626 zassert_not_null(cqe, "Expected a valid cqe");
627 zassert_ok(cqe->result, "Result should be ok");
628 if (i % 2 == 0) {
629 zassert_is_null(cqe->userdata);
630 rtio_cqe_release(r, cqe);
631 continue;
632 }
633 uintptr_t idx = *(uintptr_t *)cqe->userdata;
634
635 TC_PRINT("userdata is %p, value %" PRIuPTR "\n", cqe->userdata, idx);
636 zassert(idx == 0 || idx == 1, "idx should be 0 or 1");
637 seen[idx] = true;
638 rtio_cqe_release(r, cqe);
639 }
640
641 zassert_true(seen[0], "Should have seen transaction 0");
642 zassert_true(seen[1], "Should have seen transaction 1");
643 }
644
ZTEST(rtio_api,test_rtio_transaction)645 ZTEST(rtio_api, test_rtio_transaction)
646 {
647 TC_PRINT("initializing iodev test devices\n");
648
649 for (int i = 0; i < 2; i++) {
650 rtio_iodev_test_init(iodev_test_transaction[i]);
651 }
652
653 TC_PRINT("rtio transaction simple\n");
654 for (int i = 0; i < TEST_REPEATS; i++) {
655 test_rtio_transaction_(&r_transaction);
656 }
657 }
658
ZTEST(rtio_api,test_rtio_cqe_count_overflow)659 ZTEST(rtio_api, test_rtio_cqe_count_overflow)
660 {
661 /* atomic_t max value as `uintptr_t` */
662 const atomic_t max_uval = UINTPTR_MAX;
663
664 /* atomic_t max value as if it were a signed word `intptr_t` */
665 const atomic_t max_sval = UINTPTR_MAX >> 1;
666
667 TC_PRINT("initializing iodev test devices\n");
668
669 for (int i = 0; i < 2; i++) {
670 rtio_iodev_test_init(iodev_test_transaction[i]);
671 }
672
673 TC_PRINT("rtio transaction CQE overflow\n");
674 atomic_set(&r_transaction.cq_count, max_uval - 3);
675 for (int i = 0; i < TEST_REPEATS; i++) {
676 test_rtio_transaction_(&r_transaction);
677 }
678
679 TC_PRINT("initializing iodev test devices\n");
680
681 for (int i = 0; i < 2; i++) {
682 rtio_iodev_test_init(iodev_test_transaction[i]);
683 }
684
685 TC_PRINT("rtio transaction CQE overflow\n");
686 atomic_set(&r_transaction.cq_count, max_sval - 3);
687 for (int i = 0; i < TEST_REPEATS; i++) {
688 test_rtio_transaction_(&r_transaction);
689 }
690 }
691
692 #define RTIO_DELAY_NUM_ELEMS 10
693
694 RTIO_DEFINE(r_delay, RTIO_DELAY_NUM_ELEMS, RTIO_DELAY_NUM_ELEMS);
695
ZTEST(rtio_api,test_rtio_delay)696 ZTEST(rtio_api, test_rtio_delay)
697 {
698 int res;
699 struct rtio *r = &r_delay;
700 struct rtio_sqe *sqe;
701 struct rtio_cqe *cqe;
702
703 uint8_t expected_expiration_order[RTIO_DELAY_NUM_ELEMS] = {4, 3, 2, 1, 0, 5, 6, 7, 8, 9};
704
705 for (size_t i = 0; i < RTIO_DELAY_NUM_ELEMS; i++) {
706 sqe = rtio_sqe_acquire(r);
707 zassert_not_null(sqe, "Expected a valid sqe");
708
709 /** Half of the delays will be earlier than the previous one submitted.
710 * The other half will be later.
711 */
712 if (i < (RTIO_DELAY_NUM_ELEMS / 2)) {
713 rtio_sqe_prep_delay(sqe, K_SECONDS(10 - i), (void *)i);
714 } else {
715 rtio_sqe_prep_delay(sqe, K_SECONDS(10 - 4 + i), (void *)i);
716 }
717 }
718
719 res = rtio_submit(r, 0);
720 zassert_ok(res, "Should return ok from rtio_execute");
721
722 cqe = rtio_cqe_consume(r);
723 zassert_is_null(cqe, "There should not be a cqe since delay has not expired");
724
725 /** Wait until we expect delays start expiring */
726 k_sleep(K_SECONDS(10 - (RTIO_DELAY_NUM_ELEMS / 2)));
727
728 for (int i = 0; i < RTIO_DELAY_NUM_ELEMS; i++) {
729 k_sleep(K_SECONDS(1));
730
731 TC_PRINT("consume %d\n", i);
732 cqe = rtio_cqe_consume(r);
733 zassert_not_null(cqe, "Expected a valid cqe");
734 zassert_ok(cqe->result, "Result should be ok");
735
736 size_t expired_id = (size_t)(cqe->userdata);
737
738 zassert_equal(expected_expiration_order[i], expired_id,
739 "Expected order not valid. Obtained: %d, expected: %d",
740 (int)expired_id, (int)expected_expiration_order[i]);
741
742 rtio_cqe_release(r, cqe);
743
744 cqe = rtio_cqe_consume(r);
745 zassert_is_null(cqe, "There should not be a cqe since next delay has not expired");
746 }
747 }
748
749 #define THROUGHPUT_ITERS 100000
750 RTIO_DEFINE(r_throughput, SQE_POOL_SIZE, CQE_POOL_SIZE);
751
_test_rtio_throughput(struct rtio * r)752 void _test_rtio_throughput(struct rtio *r)
753 {
754 timing_t start_time, end_time;
755 struct rtio_cqe *cqe;
756 struct rtio_sqe *sqe;
757
758 timing_init();
759 timing_start();
760
761 start_time = timing_counter_get();
762
763 for (uint32_t i = 0; i < THROUGHPUT_ITERS; i++) {
764 sqe = rtio_sqe_acquire(r);
765 rtio_sqe_prep_nop(sqe, NULL, NULL);
766 rtio_submit(r, 0);
767 cqe = rtio_cqe_consume(r);
768 rtio_cqe_release(r, cqe);
769 }
770
771 end_time = timing_counter_get();
772
773 uint64_t cycles = timing_cycles_get(&start_time, &end_time);
774 uint64_t ns = timing_cycles_to_ns(cycles);
775
776 TC_PRINT("%llu ns for %d iterations, %llu ns per op\n",
777 ns, THROUGHPUT_ITERS, ns/THROUGHPUT_ITERS);
778 }
779
780
ZTEST(rtio_api,test_rtio_throughput)781 ZTEST(rtio_api, test_rtio_throughput)
782 {
783 _test_rtio_throughput(&r_throughput);
784 }
785
786 RTIO_DEFINE(r_callback_chaining, SQE_POOL_SIZE, CQE_POOL_SIZE);
787 RTIO_IODEV_TEST_DEFINE(iodev_test_callback_chaining0);
788 static bool cb_no_cqe_run;
789
790 /**
791 * Callback for testing with
792 */
rtio_callback_chaining_cb(struct rtio * r,const struct rtio_sqe * sqe,int result,void * arg0)793 void rtio_callback_chaining_cb(struct rtio *r, const struct rtio_sqe *sqe, int result, void *arg0)
794 {
795 TC_PRINT("chaining callback with result %d and userdata %p\n", result, arg0);
796 }
797
rtio_callback_chaining_cb_no_cqe(struct rtio * r,const struct rtio_sqe * sqe,int result,void * arg0)798 void rtio_callback_chaining_cb_no_cqe(struct rtio *r, const struct rtio_sqe *sqe,
799 int result, void *arg0)
800 {
801 TC_PRINT("Chaining callback with result %d and userdata %p (No CQE)\n", result, arg0);
802 cb_no_cqe_run = true;
803 }
804
805 /**
806 * @brief Test callback chaining requests
807 *
808 * Ensures that we can setup an RTIO context, enqueue a transaction of requests,
809 * receive completion events, and catch a callback at the end in the correct
810 * order
811 */
test_rtio_callback_chaining_(struct rtio * r)812 void test_rtio_callback_chaining_(struct rtio *r)
813 {
814 int res;
815 int32_t userdata[4] = {0, 1, 2, 3};
816 int32_t ordering[4] = { -1, -1, -1, -1};
817 struct rtio_sqe *sqe;
818 struct rtio_cqe *cqe;
819 uintptr_t cq_count = atomic_get(&r->cq_count);
820
821 rtio_iodev_test_init(&iodev_test_callback_chaining0);
822
823 sqe = rtio_sqe_acquire(r);
824 zassert_not_null(sqe, "Expected a valid sqe");
825 rtio_sqe_prep_callback(sqe, &rtio_callback_chaining_cb, sqe, &userdata[0]);
826 sqe->flags |= RTIO_SQE_CHAINED;
827
828 sqe = rtio_sqe_acquire(r);
829 zassert_not_null(sqe, "Expected a valid sqe");
830 rtio_sqe_prep_nop(sqe, &iodev_test_callback_chaining0, &userdata[1]);
831 sqe->flags |= RTIO_SQE_TRANSACTION;
832
833 sqe = rtio_sqe_acquire(r);
834 zassert_not_null(sqe, "Expected a valid sqe");
835 rtio_sqe_prep_nop(sqe, &iodev_test_callback_chaining0, &userdata[2]);
836 sqe->flags |= RTIO_SQE_CHAINED;
837
838 sqe = rtio_sqe_acquire(r);
839 zassert_not_null(sqe, "Expected a valid sqe");
840 rtio_sqe_prep_callback_no_cqe(sqe, &rtio_callback_chaining_cb_no_cqe, sqe, NULL);
841 sqe->flags |= RTIO_SQE_CHAINED;
842
843 sqe = rtio_sqe_acquire(r);
844 zassert_not_null(sqe, "Expected a valid sqe");
845 rtio_sqe_prep_callback(sqe, &rtio_callback_chaining_cb, sqe, &userdata[3]);
846
847 TC_PRINT("submitting\n");
848 res = rtio_submit(r, 4);
849 TC_PRINT("checking cq, completions available, count at start %lu, current count %lu\n",
850 cq_count, atomic_get(&r->cq_count));
851 zassert_ok(res, "Should return ok from rtio_execute");
852 zassert_equal(atomic_get(&r->cq_count) - cq_count, 4, "Should have 4 pending completions");
853 zassert_true(cb_no_cqe_run, "Callback without CQE should have run");
854
855 for (int i = 0; i < 4; i++) {
856 TC_PRINT("consume %d\n", i);
857 cqe = rtio_cqe_consume(r);
858 zassert_not_null(cqe, "Expected a valid cqe");
859 zassert_ok(cqe->result, "Result should be ok");
860
861 int32_t idx = *(int32_t *)cqe->userdata;
862
863 TC_PRINT("userdata is %p, value %d\n", cqe->userdata, idx);
864 ordering[idx] = i;
865
866 rtio_cqe_release(r, cqe);
867 }
868
869 for (int i = 0; i < 4; i++) {
870 zassert_equal(ordering[i], i,
871 "Execpted ordering of completions to match submissions");
872 }
873 }
874
ZTEST(rtio_api,test_rtio_callback_chaining)875 ZTEST(rtio_api, test_rtio_callback_chaining)
876 {
877 test_rtio_callback_chaining_(&r_callback_chaining);
878 }
879
880 RTIO_DEFINE(r_await0, SQE_POOL_SIZE, CQE_POOL_SIZE);
881 RTIO_DEFINE(r_await1, SQE_POOL_SIZE, CQE_POOL_SIZE);
882 RTIO_IODEV_TEST_DEFINE(iodev_test_await0);
883
884 /**
885 * @brief Test early signalling on await requests
886 *
887 * Ensures that the AWAIT operation will be skipped if rtio_seq_signal() was
888 * called before the AWAIT SQE is executed.
889 */
test_rtio_await_early_signal_(struct rtio * r)890 void test_rtio_await_early_signal_(struct rtio *r)
891 {
892 int res;
893 int32_t userdata = 0;
894 struct rtio_sqe *sqe;
895 struct rtio_cqe *cqe;
896
897 rtio_iodev_test_init(&iodev_test_await0);
898
899 TC_PRINT("Prepare await sqe\n");
900 sqe = rtio_sqe_acquire(r);
901 zassert_not_null(sqe, "Expected a valid sqe");
902 rtio_sqe_prep_await(sqe, &iodev_test_await0, RTIO_PRIO_LOW, &userdata);
903 sqe->flags = 0;
904
905 TC_PRINT("Signal await sqe prior to submission\n");
906 rtio_sqe_signal(sqe);
907
908 TC_PRINT("Submit await sqe\n");
909 res = rtio_submit(r, 0);
910 zassert_ok(res, "Submission failed");
911
912 TC_PRINT("Ensure await sqe completed\n");
913 cqe = rtio_cqe_consume_block(r);
914 zassert_not_null(cqe, "Expected a valid cqe");
915 zassert_equal(cqe->userdata, &userdata);
916 rtio_cqe_release(r, cqe);
917 }
918
919 /**
920 * @brief Test blocking rtio_iodev using await requests
921 *
922 * Ensures we can block execution of an RTIO iodev using the AWAIT operation,
923 * and unblock it by calling rtio_seq_signal().
924 */
test_rtio_await_iodev_(struct rtio * rtio0,struct rtio * rtio1)925 void test_rtio_await_iodev_(struct rtio *rtio0, struct rtio *rtio1)
926 {
927 int res;
928 int32_t userdata[3] = {0, 1, 2};
929 struct rtio_sqe *await_sqe;
930 struct rtio_sqe *sqe;
931 struct rtio_cqe *cqe;
932
933 rtio_iodev_test_init(&iodev_test_await0);
934
935 sqe = rtio_sqe_acquire(rtio0);
936 zassert_not_null(sqe, "Expected a valid sqe");
937 rtio_sqe_prep_nop(sqe, &iodev_test_await0, &userdata[0]);
938 sqe->flags = RTIO_SQE_TRANSACTION;
939
940 await_sqe = rtio_sqe_acquire(rtio0);
941 zassert_not_null(await_sqe, "Expected a valid sqe");
942 rtio_sqe_prep_await(await_sqe, &iodev_test_await0, RTIO_PRIO_LOW, &userdata[1]);
943 await_sqe->flags = 0;
944
945 sqe = rtio_sqe_acquire(rtio1);
946 zassert_not_null(sqe, "Expected a valid sqe");
947 rtio_sqe_prep_nop(sqe, &iodev_test_await0, &userdata[2]);
948 sqe->prio = RTIO_PRIO_HIGH;
949 sqe->flags = 0;
950
951 TC_PRINT("Submitting await sqe from rtio0\n");
952 res = rtio_submit(rtio0, 0);
953 zassert_ok(res, "Submission failed");
954
955 TC_PRINT("Ensure rtio0 has started execution\n");
956 k_sleep(K_MSEC(20));
957
958 TC_PRINT("Submitting sqe from rtio1\n");
959 res = rtio_submit(rtio1, 0);
960 zassert_ok(res, "Submission failed");
961
962 TC_PRINT("Ensure sqe from rtio1 not completed\n");
963 k_sleep(K_MSEC(100));
964 cqe = rtio_cqe_consume(rtio1);
965 zassert_equal(cqe, NULL, "Expected no valid cqe");
966
967 TC_PRINT("Signal await sqe from rtio0\n");
968 rtio_sqe_signal(await_sqe);
969
970 TC_PRINT("Ensure both sqe from rtio0 completed\n");
971 cqe = rtio_cqe_consume_block(rtio0);
972 zassert_not_null(cqe, "Expected a valid cqe");
973 zassert_equal(cqe->userdata, &userdata[0]);
974 rtio_cqe_release(rtio0, cqe);
975 cqe = rtio_cqe_consume_block(rtio0);
976 zassert_not_null(cqe, "Expected a valid cqe");
977 zassert_equal(cqe->userdata, &userdata[1]);
978 rtio_cqe_release(rtio0, cqe);
979
980 TC_PRINT("Ensure sqe from rtio1 completed\n");
981 cqe = rtio_cqe_consume_block(rtio1);
982 zassert_not_null(cqe, "Expected a valid cqe");
983 zassert_equal(cqe->userdata, &userdata[2]);
984 rtio_cqe_release(rtio1, cqe);
985 }
986
987 /**
988 * @brief Test await operations handled purely by the executor
989 *
990 * Ensures we can pause just one SQE chain using the AWAIT operation, letting the rtio_iodev serve
991 * other sequences during the wait, and finally resume the executor by calling rtio_sqe_signal().
992 */
test_rtio_await_executor_(struct rtio * rtio0,struct rtio * rtio1)993 void test_rtio_await_executor_(struct rtio *rtio0, struct rtio *rtio1)
994 {
995 int res;
996 int32_t userdata[4] = {0, 1, 2, 3};
997 struct rtio_sqe *await_sqe;
998 struct rtio_sqe *sqe;
999 struct rtio_cqe *cqe;
1000
1001 rtio_iodev_test_init(&iodev_test_await0);
1002
1003 /* Prepare a NOP->AWAIT chain on rtio0 to verify the blocking behavior of AWAIT */
1004 sqe = rtio_sqe_acquire(rtio0);
1005 zassert_not_null(sqe, "Expected a valid sqe");
1006 rtio_sqe_prep_nop(sqe, &iodev_test_await0, &userdata[0]);
1007 sqe->flags = RTIO_SQE_CHAINED;
1008
1009 await_sqe = rtio_sqe_acquire(rtio0);
1010 zassert_not_null(await_sqe, "Expected a valid sqe");
1011 rtio_sqe_prep_await_executor(await_sqe, RTIO_PRIO_LOW, &userdata[1]);
1012 await_sqe->flags = 0;
1013
1014 /*
1015 * Prepare another NOP on rtio0, to verify that while the await is busy, the executor
1016 * can process an unconnected operation
1017 */
1018 sqe = rtio_sqe_acquire(rtio0);
1019 zassert_not_null(sqe, "Expected a valid sqe");
1020 rtio_sqe_prep_nop(sqe, &iodev_test_await0, &userdata[3]);
1021 sqe->flags = 0;
1022
1023 /* Prepare a NOP sqe on rtio1 */
1024 sqe = rtio_sqe_acquire(rtio1);
1025 zassert_not_null(sqe, "Expected a valid sqe");
1026 rtio_sqe_prep_nop(sqe, &iodev_test_await0, &userdata[2]);
1027 sqe->prio = RTIO_PRIO_HIGH;
1028 sqe->flags = 0;
1029
1030 /* Submit the rtio0 sequence and make sure it reaches the AWAIT sqe */
1031 TC_PRINT("Submitting await sqe from rtio0\n");
1032 res = rtio_submit(rtio0, 0);
1033 zassert_ok(res, "Submission failed");
1034
1035 TC_PRINT("Wait for nop sqe from rtio0 completed\n");
1036 cqe = rtio_cqe_consume_block(rtio0);
1037 zassert_not_null(sqe, "Expected a valid sqe");
1038 zassert_equal(cqe->userdata, &userdata[0]);
1039 rtio_cqe_release(rtio0, cqe);
1040
1041 /* Submit rtio1 sequence and ensure it completes while rtio0 is paused at the AWAIT */
1042 TC_PRINT("Submitting sqe from rtio1\n");
1043 res = rtio_submit(rtio1, 0);
1044 zassert_ok(res, "Submission failed");
1045
1046 TC_PRINT("Ensure sqe from rtio1 completes\n");
1047 cqe = rtio_cqe_consume_block(rtio1);
1048 zassert_not_null(cqe, "Expected a valid cqe");
1049 zassert_equal(cqe->userdata, &userdata[2]);
1050 rtio_cqe_release(rtio1, cqe);
1051
1052 /* Verify that rtio0 processes the freestanding NOP during the await */
1053 TC_PRINT("Ensure freestanding NOP completes while await is busy\n");
1054 cqe = rtio_cqe_consume_block(rtio0);
1055 zassert_not_null(cqe, "Expected a valid cqe");
1056 zassert_equal(cqe->userdata, &userdata[3]);
1057 rtio_cqe_release(rtio1, cqe);
1058
1059 /* Make sure rtio0 is still paused at the AWAIT and finally complete it */
1060 TC_PRINT("Ensure await_sqe is not completed unintentionally\n");
1061 cqe = rtio_cqe_consume(rtio0);
1062 zassert_equal(cqe, NULL, "Expected no valid cqe");
1063
1064 TC_PRINT("Signal await sqe from rtio0\n");
1065 rtio_sqe_signal(await_sqe);
1066
1067 TC_PRINT("Ensure sqe from rtio0 completed\n");
1068 cqe = rtio_cqe_consume_block(rtio0);
1069 zassert_not_null(cqe, "Expected a valid cqe");
1070 zassert_equal(cqe->userdata, &userdata[1]);
1071 rtio_cqe_release(rtio0, cqe);
1072 }
1073
ZTEST(rtio_api,test_rtio_await)1074 ZTEST(rtio_api, test_rtio_await)
1075 {
1076 test_rtio_await_early_signal_(&r_await0);
1077 test_rtio_await_iodev_(&r_await0, &r_await1);
1078 test_rtio_await_executor_(&r_await0, &r_await1);
1079 }
1080
1081
1082 RTIO_DEFINE(r_callback_result, SQE_POOL_SIZE, CQE_POOL_SIZE);
1083 RTIO_IODEV_TEST_DEFINE(iodev_test_callback_result);
1084 static int callback_count;
1085 static int callback_result;
1086 static int expected_callback_result;
1087
callback_update_data(struct rtio * r,const struct rtio_sqe * sqe,int result,void * arg0)1088 void callback_update_data(struct rtio *r, const struct rtio_sqe *sqe,
1089 int result, void *arg0)
1090 {
1091 _iodev_data_iodev_test_callback_result.result = expected_callback_result;
1092 callback_count++;
1093 }
1094
callback_stash_result(struct rtio * r,const struct rtio_sqe * sqe,int result,void * arg0)1095 void callback_stash_result(struct rtio *r, const struct rtio_sqe *sqe,
1096 int result, void *arg0)
1097 {
1098 callback_result = result;
1099 callback_count++;
1100 }
1101
1102 /*
1103 * Ensure callbacks work as expected.
1104 *
1105 * 1. Callbacks always occur
1106 * 2. The result code always contains the first error result
1107 */
ZTEST(rtio_api,test_rtio_callbacks)1108 ZTEST(rtio_api, test_rtio_callbacks)
1109 {
1110 struct rtio *r = &r_callback_result;
1111 struct rtio_iodev *iodev = &iodev_test_callback_result;
1112 struct rtio_sqe *nop1 = rtio_sqe_acquire(r);
1113 struct rtio_sqe *cb1 = rtio_sqe_acquire(r);
1114 struct rtio_sqe *nop2 = rtio_sqe_acquire(r);
1115 struct rtio_sqe *nop3 = rtio_sqe_acquire(r);
1116 struct rtio_sqe *cb2 = rtio_sqe_acquire(r);
1117
1118 rtio_iodev_test_init(&iodev_test_callback_result);
1119
1120 callback_result = 0;
1121 callback_count = 0;
1122 expected_callback_result = -EIO;
1123
1124 rtio_sqe_prep_nop(nop1, iodev, NULL);
1125 nop1->flags |= RTIO_SQE_CHAINED;
1126 rtio_sqe_prep_callback(cb1, callback_update_data, NULL, NULL);
1127 cb1->flags |= RTIO_SQE_CHAINED;
1128 rtio_sqe_prep_nop(nop2, iodev, NULL);
1129 nop2->flags |= RTIO_SQE_CHAINED;
1130 rtio_sqe_prep_nop(nop3, iodev, NULL);
1131 nop3->flags |= RTIO_SQE_CHAINED;
1132 rtio_sqe_prep_callback(cb2, callback_stash_result, NULL, NULL);
1133
1134 rtio_submit(r, 5);
1135
1136 zassert_equal(callback_result, expected_callback_result,
1137 "expected results given to second callback to be an predefine error");
1138 zassert_equal(callback_count, 2, "expected two callbacks to complete");
1139 }
1140
1141
1142 RTIO_DEFINE(r_acquire_array, SQE_POOL_SIZE, CQE_POOL_SIZE);
1143
ZTEST(rtio_api,test_rtio_acquire_array)1144 ZTEST(rtio_api, test_rtio_acquire_array)
1145 {
1146 TC_PRINT("rtio acquire array\n");
1147
1148 struct rtio_sqe *sqes[SQE_POOL_SIZE];
1149
1150 int res = rtio_sqe_acquire_array(&r_acquire_array, SQE_POOL_SIZE, sqes);
1151
1152 zassert_ok(res, "Expected to acquire sqes");
1153
1154 struct rtio_sqe *last_sqe;
1155
1156 res = rtio_sqe_acquire_array(&r_acquire_array, 1, &last_sqe);
1157 zassert_equal(res, -ENOMEM, "Expected to have no more sqes available");
1158
1159 rtio_sqe_drop_all(&r_acquire_array);
1160
1161 res = rtio_sqe_acquire_array(&r_acquire_array, SQE_POOL_SIZE - 1, sqes);
1162 zassert_ok(res, "Expected to acquire sqes");
1163 res = rtio_sqe_acquire_array(&r_acquire_array, 2, &last_sqe);
1164 zassert_equal(res, -ENOMEM, "Expected to have only have a single sqe available");
1165 res = rtio_sqe_acquire_array(&r_acquire_array, 1, &last_sqe);
1166 zassert_equal(res, 0, "Expected a single sqe available");
1167 res = rtio_sqe_acquire_array(&r_acquire_array, 1, &last_sqe);
1168 zassert_equal(res, -ENOMEM, "Expected to have no more sqes available");
1169
1170 rtio_sqe_drop_all(&r_acquire_array);
1171 }
1172
rtio_api_setup(void)1173 static void *rtio_api_setup(void)
1174 {
1175 #ifdef CONFIG_USERSPACE
1176 k_mem_domain_init(&rtio_domain, 0, NULL);
1177 k_mem_domain_add_partition(&rtio_domain, &rtio_partition);
1178 #if Z_LIBC_PARTITION_EXISTS
1179 k_mem_domain_add_partition(&rtio_domain, &z_libc_partition);
1180 #endif /* Z_LIBC_PARTITION_EXISTS */
1181 #endif /* CONFIG_USERSPACE */
1182
1183 return NULL;
1184 }
1185
rtio_api_before(void * a)1186 static void rtio_api_before(void *a)
1187 {
1188 ARG_UNUSED(a);
1189
1190 STRUCT_SECTION_FOREACH(rtio, r)
1191 {
1192 struct rtio_cqe cqe;
1193
1194 while (rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15))) {
1195 }
1196 }
1197
1198 rtio_iodev_test_init(&iodev_test_simple);
1199 rtio_iodev_test_init(&iodev_test_syscall);
1200 #ifdef CONFIG_USERSPACE
1201 k_mem_domain_add_thread(&rtio_domain, k_current_get());
1202 rtio_access_grant(&r_simple, k_current_get());
1203 rtio_access_grant(&r_syscall, k_current_get());
1204 k_object_access_grant(&iodev_test_simple, k_current_get());
1205 k_object_access_grant(&iodev_test_syscall, k_current_get());
1206 #endif
1207 }
1208
1209 ZTEST_SUITE(rtio_api, NULL, rtio_api_setup, rtio_api_before, NULL, NULL);
1210