1 /*
2 * Copyright (c) 2021 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <zephyr/ztest.h>
9 #include <zephyr/kernel.h>
10 #include <zephyr/sys/atomic.h>
11 #include <zephyr/sys/kobject.h>
12 #include <zephyr/sys/libc-hooks.h>
13 #include <zephyr/app_memory/mem_domain.h>
14 #include <zephyr/sys/util_loops.h>
15 #include <zephyr/sys/time_units.h>
16 #include <zephyr/timing/timing.h>
17 #include <zephyr/rtio/rtio.h>
18
19 #include "rtio_iodev_test.h"
20
21 /* Repeat tests to ensure they are repeatable */
22 #define TEST_REPEATS 4
23
24 #define MEM_BLK_COUNT 4
25 #define MEM_BLK_SIZE 16
26 #define MEM_BLK_ALIGN 4
27
28 #define SQE_POOL_SIZE 5
29 #define CQE_POOL_SIZE 5
30
31 /*
32 * Purposefully double the block count and half the block size. This leaves the same size mempool,
33 * but ensures that allocation is done in larger blocks because the tests assume a larger block
34 * size.
35 */
36 RTIO_DEFINE_WITH_MEMPOOL(r_simple, SQE_POOL_SIZE, CQE_POOL_SIZE, MEM_BLK_COUNT * 2,
37 MEM_BLK_SIZE / 2, MEM_BLK_ALIGN);
38
39 RTIO_IODEV_TEST_DEFINE(iodev_test_simple);
40
41 /**
42 * @brief Test the basics of the RTIO API
43 *
44 * Ensures that we can setup an RTIO context, enqueue a request, and receive
45 * a completion event.
46 */
test_rtio_simple_(struct rtio * r)47 void test_rtio_simple_(struct rtio *r)
48 {
49 int res;
50 uintptr_t userdata[2] = {0, 1};
51 struct rtio_sqe *sqe;
52 struct rtio_cqe *cqe;
53
54 rtio_iodev_test_init(&iodev_test_simple);
55
56 TC_PRINT("setting up single no-op\n");
57 sqe = rtio_sqe_acquire(r);
58 zassert_not_null(sqe, "Expected a valid sqe");
59 rtio_sqe_prep_nop(sqe, (struct rtio_iodev *)&iodev_test_simple, &userdata[0]);
60
61 TC_PRINT("submit with wait\n");
62 res = rtio_submit(r, 1);
63 zassert_ok(res, "Should return ok from rtio_execute");
64
65 cqe = rtio_cqe_consume(r);
66 zassert_not_null(cqe, "Expected a valid cqe");
67 zassert_ok(cqe->result, "Result should be ok");
68 zassert_equal_ptr(cqe->userdata, &userdata[0], "Expected userdata back");
69 rtio_cqe_release(r, cqe);
70 }
71
ZTEST(rtio_api,test_rtio_simple)72 ZTEST(rtio_api, test_rtio_simple)
73 {
74 TC_PRINT("rtio simple simple\n");
75 for (int i = 0; i < TEST_REPEATS; i++) {
76 test_rtio_simple_(&r_simple);
77 }
78 }
79
ZTEST(rtio_api,test_rtio_no_response)80 ZTEST(rtio_api, test_rtio_no_response)
81 {
82 int res;
83 uintptr_t userdata[2] = {0, 1};
84 struct rtio_sqe *sqe;
85 struct rtio_cqe cqe;
86
87 rtio_iodev_test_init(&iodev_test_simple);
88
89 sqe = rtio_sqe_acquire(&r_simple);
90 zassert_not_null(sqe, "Expected a valid sqe");
91 rtio_sqe_prep_nop(sqe, (struct rtio_iodev *)&iodev_test_simple, &userdata[0]);
92 sqe->flags |= RTIO_SQE_NO_RESPONSE;
93
94 res = rtio_submit(&r_simple, 0);
95 zassert_ok(res, "Should return ok from rtio_execute");
96
97 res = rtio_cqe_copy_out(&r_simple, &cqe, 1, K_MSEC(500));
98 zassert_equal(0, res, "Expected no CQEs");
99 }
100
101 RTIO_DEFINE(r_chain, SQE_POOL_SIZE, CQE_POOL_SIZE);
102
103 RTIO_IODEV_TEST_DEFINE(iodev_test_chain0);
104 RTIO_IODEV_TEST_DEFINE(iodev_test_chain1);
105 struct rtio_iodev *iodev_test_chain[] = {&iodev_test_chain0, &iodev_test_chain1};
106
107 /**
108 * @brief Test chained requests
109 *
110 * Ensures that we can setup an RTIO context, enqueue a chained requests,
111 * and receive completion events in the correct order given the chained
112 * flag and multiple devices where serialization isn't guaranteed.
113 */
test_rtio_chain_(struct rtio * r)114 void test_rtio_chain_(struct rtio *r)
115 {
116 int res;
117 uint32_t userdata[4] = {0, 1, 2, 3};
118 struct rtio_sqe *sqe;
119 struct rtio_cqe *cqe;
120 uintptr_t cq_count = atomic_get(&r->cq_count);
121
122 for (int i = 0; i < 4; i++) {
123 sqe = rtio_sqe_acquire(r);
124 zassert_not_null(sqe, "Expected a valid sqe");
125 rtio_sqe_prep_nop(sqe, iodev_test_chain[i % 2],
126 &userdata[i]);
127 sqe->flags |= RTIO_SQE_CHAINED;
128 TC_PRINT("produce %d, sqe %p, userdata %d\n", i, sqe, userdata[i]);
129 }
130
131 /* Clear the last one */
132 sqe->flags = 0;
133
134 TC_PRINT("submitting\n");
135
136 res = rtio_submit(r, 4);
137 TC_PRINT("checking cq\n");
138 zassert_ok(res, "Should return ok from rtio_execute");
139 zassert_equal(atomic_get(&r->cq_count) - cq_count, 4, "Should have 4 pending completions");
140
141 for (int i = 0; i < 4; i++) {
142 cqe = rtio_cqe_consume(r);
143 zassert_not_null(cqe, "Expected a valid cqe");
144 TC_PRINT("consume %d, cqe %p, userdata %d\n", i, cqe, *(uint32_t *)cqe->userdata);
145 zassert_ok(cqe->result, "Result should be ok");
146
147 zassert_equal_ptr(cqe->userdata, &userdata[i], "Expected in order completions");
148 rtio_cqe_release(r, cqe);
149 }
150 }
151
ZTEST(rtio_api,test_rtio_chain)152 ZTEST(rtio_api, test_rtio_chain)
153 {
154 TC_PRINT("initializing iodev test devices\n");
155
156 for (int i = 0; i < 2; i++) {
157 rtio_iodev_test_init(iodev_test_chain[i]);
158 }
159
160 TC_PRINT("rtio chain simple\n");
161 for (int i = 0; i < TEST_REPEATS; i++) {
162 test_rtio_chain_(&r_chain);
163 }
164 }
165
166 RTIO_DEFINE(r_multi_chain, SQE_POOL_SIZE, CQE_POOL_SIZE);
167
168 RTIO_IODEV_TEST_DEFINE(iodev_test_multi0);
169 RTIO_IODEV_TEST_DEFINE(iodev_test_multi1);
170 struct rtio_iodev *iodev_test_multi[] = {&iodev_test_multi0, &iodev_test_multi1};
171
172 /**
173 * @brief Test multiple asynchronous chains against one iodev
174 */
test_rtio_multiple_chains_(struct rtio * r)175 void test_rtio_multiple_chains_(struct rtio *r)
176 {
177 int res;
178 uintptr_t userdata[4] = {0, 1, 2, 3};
179 struct rtio_sqe *sqe;
180 struct rtio_cqe *cqe = NULL;
181
182 for (int i = 0; i < 2; i++) {
183 for (int j = 0; j < 2; j++) {
184 sqe = rtio_sqe_acquire(r);
185 zassert_not_null(sqe, "Expected a valid sqe");
186 rtio_sqe_prep_nop(sqe, iodev_test_multi[i],
187 (void *)userdata[i*2 + j]);
188 if (j == 0) {
189 sqe->flags |= RTIO_SQE_CHAINED;
190 } else {
191 sqe->flags |= 0;
192 }
193 }
194 }
195
196 TC_PRINT("calling submit from test case\n");
197 res = rtio_submit(r, 0);
198 zassert_ok(res, "Should return ok from rtio_execute");
199
200 bool seen[4] = { 0 };
201
202 TC_PRINT("waiting for 4 completions\n");
203 for (int i = 0; i < 4; i++) {
204 TC_PRINT("waiting on completion %d\n", i);
205
206 cqe = rtio_cqe_consume(r);
207 while (cqe == NULL) {
208 k_sleep(K_MSEC(1));
209 cqe = rtio_cqe_consume(r);
210 }
211
212 TC_PRINT("consumed cqe %p, result, %d, userdata %lu\n", cqe,
213 cqe->result, (uintptr_t)cqe->userdata);
214
215 zassert_not_null(cqe, "Expected a valid cqe");
216 zassert_ok(cqe->result, "Result should be ok");
217 seen[(uintptr_t)cqe->userdata] = true;
218 if (seen[1]) {
219 zassert_true(seen[0], "Should see 0 before 1");
220 }
221 if (seen[3]) {
222 zassert_true(seen[2], "Should see 2 before 3");
223 }
224 rtio_cqe_release(r, cqe);
225 }
226 }
227
ZTEST(rtio_api,test_rtio_multiple_chains)228 ZTEST(rtio_api, test_rtio_multiple_chains)
229 {
230 for (int i = 0; i < 2; i++) {
231 rtio_iodev_test_init(iodev_test_multi[i]);
232 }
233
234 TC_PRINT("rtio multiple chains\n");
235 test_rtio_multiple_chains_(&r_multi_chain);
236 }
237
238 #ifdef CONFIG_USERSPACE
239 struct k_mem_domain rtio_domain;
240 #endif
241
242 RTIO_BMEM uint8_t syscall_bufs[4];
243
244 RTIO_DEFINE(r_syscall, SQE_POOL_SIZE, CQE_POOL_SIZE);
245 RTIO_IODEV_TEST_DEFINE(iodev_test_syscall);
246
ZTEST_USER(rtio_api,test_rtio_syscalls)247 ZTEST_USER(rtio_api, test_rtio_syscalls)
248 {
249 int res;
250 struct rtio_sqe sqe = {0};
251 struct rtio_cqe cqe = {0};
252
253 struct rtio *r = &r_syscall;
254
255 for (int i = 0; i < 4; i++) {
256 TC_PRINT("copying sqe in from stack\n");
257 /* Not really legal from userspace! Ugh */
258 rtio_sqe_prep_nop(&sqe, &iodev_test_syscall,
259 &syscall_bufs[i]);
260 res = rtio_sqe_copy_in(r, &sqe, 1);
261 zassert_equal(res, 0, "Expected success copying sqe");
262 }
263
264 TC_PRINT("submitting\n");
265 res = rtio_submit(r, 4);
266
267 for (int i = 0; i < 4; i++) {
268 TC_PRINT("consume %d\n", i);
269 res = rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER);
270 zassert_equal(res, 1, "Expected success copying cqe");
271 zassert_ok(cqe.result, "Result should be ok");
272 zassert_equal_ptr(cqe.userdata, &syscall_bufs[i],
273 "Expected in order completions");
274 }
275 }
276
277 RTIO_BMEM uint8_t mempool_data[MEM_BLK_SIZE];
278
test_rtio_simple_mempool_(struct rtio * r,int run_count)279 static void test_rtio_simple_mempool_(struct rtio *r, int run_count)
280 {
281 int res;
282 struct rtio_sqe sqe = {0};
283 struct rtio_cqe cqe = {0};
284
285 for (int i = 0; i < MEM_BLK_SIZE; ++i) {
286 mempool_data[i] = i + run_count;
287 }
288
289 TC_PRINT("setting up single mempool read %p\n", r);
290 rtio_sqe_prep_read_with_pool(&sqe, (struct rtio_iodev *)&iodev_test_simple, 0,
291 mempool_data);
292 TC_PRINT("Calling rtio_sqe_copy_in()\n");
293 res = rtio_sqe_copy_in(r, &sqe, 1);
294 zassert_ok(res);
295
296 TC_PRINT("submit with wait\n");
297 res = rtio_submit(r, 1);
298 zassert_ok(res, "Should return ok from rtio_submit");
299
300 TC_PRINT("Calling rtio_cqe_copy_out\n");
301 res = rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER);
302 zassert_equal(1, res);
303 TC_PRINT("cqe result %d, userdata %p\n", cqe.result, cqe.userdata);
304 zassert_ok(cqe.result, "Result should be ok");
305 zassert_equal_ptr(cqe.userdata, mempool_data, "Expected userdata back");
306
307 uint8_t *buffer = NULL;
308 uint32_t buffer_len = 0;
309
310 TC_PRINT("Calling rtio_cqe_get_mempool_buffer\n");
311 zassert_ok(rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len));
312
313 zassert_not_null(buffer, "Expected an allocated mempool buffer");
314 zassert_equal(buffer_len, MEM_BLK_SIZE);
315 zassert_mem_equal(buffer, mempool_data, MEM_BLK_SIZE, "Data expected to be the same");
316 TC_PRINT("Calling rtio_cqe_get_mempool_buffer\n");
317 rtio_release_buffer(r, buffer, buffer_len);
318 }
319
ZTEST_USER(rtio_api,test_rtio_simple_mempool)320 ZTEST_USER(rtio_api, test_rtio_simple_mempool)
321 {
322 for (int i = 0; i < TEST_REPEATS * 2; i++) {
323 test_rtio_simple_mempool_(&r_simple, i);
324 }
325 }
326
test_rtio_simple_cancel_(struct rtio * r)327 static void test_rtio_simple_cancel_(struct rtio *r)
328 {
329 struct rtio_sqe sqe[SQE_POOL_SIZE];
330 struct rtio_cqe cqe;
331 struct rtio_sqe *handle;
332
333 rtio_sqe_prep_nop(sqe, (struct rtio_iodev *)&iodev_test_simple, NULL);
334 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 1);
335 rtio_sqe_cancel(handle);
336 TC_PRINT("Submitting 1 to RTIO\n");
337 rtio_submit(r, 0);
338
339 /* Check that we don't get a CQE */
340 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
341
342 /* Check that the SQE pool is empty by filling it all the way */
343 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
344 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
345 }
346 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
347
348 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
349 rtio_submit(r, SQE_POOL_SIZE);
350 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
351 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
352 }
353 }
354
ZTEST_USER(rtio_api,test_rtio_simple_cancel)355 ZTEST_USER(rtio_api, test_rtio_simple_cancel)
356 {
357 for (int i = 0; i < TEST_REPEATS; i++) {
358 test_rtio_simple_cancel_(&r_simple);
359 }
360 }
361
test_rtio_chain_cancel_(struct rtio * r)362 static void test_rtio_chain_cancel_(struct rtio *r)
363 {
364 struct rtio_sqe sqe[SQE_POOL_SIZE];
365 struct rtio_cqe cqe;
366 struct rtio_sqe *handle;
367
368 /* Prepare the chain */
369 rtio_sqe_prep_nop(&sqe[0], (struct rtio_iodev *)&iodev_test_simple, NULL);
370 rtio_sqe_prep_nop(&sqe[1], (struct rtio_iodev *)&iodev_test_simple, NULL);
371 sqe[0].flags |= RTIO_SQE_CHAINED;
372
373 /* Copy the chain */
374 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 2);
375 rtio_sqe_cancel(handle);
376 k_msleep(20);
377 rtio_submit(r, 0);
378
379 /* Check that we don't get cancelled completion notifications */
380 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
381
382 /* Check that the SQE pool is empty by filling it all the way */
383 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
384 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
385 }
386 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
387
388 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
389 rtio_submit(r, SQE_POOL_SIZE);
390 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
391 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
392 }
393
394 /* Try cancelling the middle sqe in a chain */
395 rtio_sqe_prep_nop(&sqe[0], (struct rtio_iodev *)&iodev_test_simple, NULL);
396 rtio_sqe_prep_nop(&sqe[1], (struct rtio_iodev *)&iodev_test_simple, NULL);
397 rtio_sqe_prep_nop(&sqe[2], (struct rtio_iodev *)&iodev_test_simple, NULL);
398 sqe[0].flags |= RTIO_SQE_CHAINED;
399 sqe[1].flags |= RTIO_SQE_CHAINED | RTIO_SQE_CANCELED;
400
401 /* Copy in the first non cancelled sqe */
402 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 3);
403 rtio_submit(r, 1);
404
405 /* Check that we get one completion no cancellation notifications */
406 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
407
408 /* Check that we get no more completions for the cancelled submissions */
409 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
410
411 /* Check that the SQE pool is empty by filling it all the way */
412 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
413 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
414 }
415 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
416
417 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
418 rtio_submit(r, SQE_POOL_SIZE);
419 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
420 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
421 }
422 }
423
ZTEST_USER(rtio_api,test_rtio_chain_cancel)424 ZTEST_USER(rtio_api, test_rtio_chain_cancel)
425 {
426 TC_PRINT("start test\n");
427 k_msleep(20);
428 for (int i = 0; i < TEST_REPEATS; i++) {
429 test_rtio_chain_cancel_(&r_simple);
430 }
431 }
432
test_rtio_transaction_cancel_(struct rtio * r)433 static void test_rtio_transaction_cancel_(struct rtio *r)
434 {
435 struct rtio_sqe sqe[SQE_POOL_SIZE];
436 struct rtio_cqe cqe;
437 struct rtio_sqe *handle;
438
439 /* Prepare the chain */
440 rtio_sqe_prep_nop(&sqe[0], (struct rtio_iodev *)&iodev_test_simple, NULL);
441 rtio_sqe_prep_nop(&sqe[1], (struct rtio_iodev *)&iodev_test_simple, NULL);
442 sqe[0].flags |= RTIO_SQE_TRANSACTION;
443
444 /* Copy the chain */
445 rtio_sqe_copy_in_get_handles(r, sqe, &handle, 2);
446 rtio_sqe_cancel(handle);
447 TC_PRINT("Submitting 2 to RTIO\n");
448 rtio_submit(r, 0);
449
450 /* Check that we don't get a CQE */
451 zassert_equal(0, rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)));
452
453 /* Check that the SQE pool is empty by filling it all the way */
454 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
455 rtio_sqe_prep_nop(&sqe[i], (struct rtio_iodev *)&iodev_test_simple, NULL);
456 }
457 zassert_ok(rtio_sqe_copy_in(r, sqe, SQE_POOL_SIZE));
458
459 /* Since there's no good way to just reset the RTIO context, wait for the nops to finish */
460 rtio_submit(r, SQE_POOL_SIZE);
461 for (int i = 0; i < SQE_POOL_SIZE; ++i) {
462 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
463 }
464 }
465
ZTEST_USER(rtio_api,test_rtio_transaction_cancel)466 ZTEST_USER(rtio_api, test_rtio_transaction_cancel)
467 {
468 for (int i = 0; i < TEST_REPEATS; i++) {
469 test_rtio_transaction_cancel_(&r_simple);
470 }
471 }
472
test_rtio_simple_multishot_(struct rtio * r,int idx)473 static inline void test_rtio_simple_multishot_(struct rtio *r, int idx)
474 {
475 int res;
476 struct rtio_sqe sqe;
477 struct rtio_cqe cqe;
478 struct rtio_sqe *handle;
479
480 for (int i = 0; i < MEM_BLK_SIZE; ++i) {
481 mempool_data[i] = i + idx;
482 }
483
484 TC_PRINT("setting up single mempool read\n");
485 rtio_sqe_prep_read_multishot(&sqe, (struct rtio_iodev *)&iodev_test_simple, 0,
486 mempool_data);
487 TC_PRINT("Calling rtio_sqe_copy_in()\n");
488 res = rtio_sqe_copy_in_get_handles(r, &sqe, &handle, 1);
489 zassert_ok(res);
490
491 TC_PRINT("submit with wait, handle=%p\n", handle);
492 res = rtio_submit(r, 1);
493 zassert_ok(res, "Should return ok from rtio_execute");
494
495 TC_PRINT("Calling rtio_cqe_copy_out\n");
496 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
497 zassert_ok(cqe.result, "Result should be ok but got %d", cqe.result);
498 zassert_equal_ptr(cqe.userdata, mempool_data, "Expected userdata back");
499
500 uint8_t *buffer = NULL;
501 uint32_t buffer_len = 0;
502
503 TC_PRINT("Calling rtio_cqe_get_mempool_buffer\n");
504 zassert_ok(rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len));
505
506 zassert_not_null(buffer, "Expected an allocated mempool buffer");
507 zassert_equal(buffer_len, MEM_BLK_SIZE);
508 zassert_mem_equal(buffer, mempool_data, MEM_BLK_SIZE, "Data expected to be the same");
509 TC_PRINT("Calling rtio_release_buffer\n");
510 rtio_release_buffer(r, buffer, buffer_len);
511
512 TC_PRINT("Waiting for next cqe\n");
513 zassert_equal(1, rtio_cqe_copy_out(r, &cqe, 1, K_FOREVER));
514 zassert_ok(cqe.result, "Result should be ok but got %d", cqe.result);
515 zassert_equal_ptr(cqe.userdata, mempool_data, "Expected userdata back");
516 rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len);
517 rtio_release_buffer(r, buffer, buffer_len);
518
519 TC_PRINT("Canceling %p\n", handle);
520 rtio_sqe_cancel(handle);
521 /* Flush any pending CQEs */
522 while (rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15)) != 0) {
523 rtio_cqe_get_mempool_buffer(r, &cqe, &buffer, &buffer_len);
524 rtio_release_buffer(r, buffer, buffer_len);
525 }
526 }
527
ZTEST_USER(rtio_api,test_rtio_multishot)528 ZTEST_USER(rtio_api, test_rtio_multishot)
529 {
530 for (int i = 0; i < TEST_REPEATS; i++) {
531 test_rtio_simple_multishot_(&r_simple, i);
532 }
533 }
534
535 RTIO_DEFINE(r_transaction, SQE_POOL_SIZE, CQE_POOL_SIZE);
536
537 RTIO_IODEV_TEST_DEFINE(iodev_test_transaction0);
538 RTIO_IODEV_TEST_DEFINE(iodev_test_transaction1);
539 struct rtio_iodev *iodev_test_transaction[] = {&iodev_test_transaction0, &iodev_test_transaction1};
540
541 /**
542 * @brief Test transaction requests
543 *
544 * Ensures that we can setup an RTIO context, enqueue a transaction requests,
545 * and receive completion events in the correct order given the transaction
546 * flag and multiple devices where serialization isn't guaranteed.
547 */
test_rtio_transaction_(struct rtio * r)548 void test_rtio_transaction_(struct rtio *r)
549 {
550 int res;
551 uintptr_t userdata[2] = {0, 1};
552 struct rtio_sqe *sqe;
553 struct rtio_cqe *cqe;
554 bool seen[2] = { 0 };
555 uintptr_t cq_count = atomic_get(&r->cq_count);
556
557 sqe = rtio_sqe_acquire(r);
558 zassert_not_null(sqe, "Expected a valid sqe");
559 rtio_sqe_prep_nop(sqe, &iodev_test_transaction0, NULL);
560 sqe->flags |= RTIO_SQE_TRANSACTION;
561
562 sqe = rtio_sqe_acquire(r);
563 zassert_not_null(sqe, "Expected a valid sqe");
564 rtio_sqe_prep_nop(sqe, NULL, &userdata[0]);
565
566 sqe = rtio_sqe_acquire(r);
567 zassert_not_null(sqe, "Expected a valid sqe");
568 rtio_sqe_prep_nop(sqe, &iodev_test_transaction1, NULL);
569 sqe->flags |= RTIO_SQE_TRANSACTION;
570
571 sqe = rtio_sqe_acquire(r);
572 zassert_not_null(sqe, "Expected a valid sqe");
573 rtio_sqe_prep_nop(sqe, NULL,
574 &userdata[1]);
575
576 TC_PRINT("submitting userdata 0 %p, userdata 1 %p\n", &userdata[0], &userdata[1]);
577 res = rtio_submit(r, 4);
578 TC_PRINT("checking cq, completions available, count at start %lu, current count %lu\n",
579 cq_count, atomic_get(&r->cq_count));
580 zassert_ok(res, "Should return ok from rtio_execute");
581 zassert_equal(atomic_get(&r->cq_count) - cq_count, 4, "Should have 4 pending completions");
582
583 for (int i = 0; i < 4; i++) {
584 TC_PRINT("consume %d\n", i);
585 cqe = rtio_cqe_consume(r);
586 zassert_not_null(cqe, "Expected a valid cqe");
587 zassert_ok(cqe->result, "Result should be ok");
588 if (i % 2 == 0) {
589 zassert_is_null(cqe->userdata);
590 rtio_cqe_release(r, cqe);
591 continue;
592 }
593 uintptr_t idx = *(uintptr_t *)cqe->userdata;
594
595 TC_PRINT("userdata is %p, value %" PRIuPTR "\n", cqe->userdata, idx);
596 zassert(idx == 0 || idx == 1, "idx should be 0 or 1");
597 seen[idx] = true;
598 rtio_cqe_release(r, cqe);
599 }
600
601 zassert_true(seen[0], "Should have seen transaction 0");
602 zassert_true(seen[1], "Should have seen transaction 1");
603 }
604
ZTEST(rtio_api,test_rtio_transaction)605 ZTEST(rtio_api, test_rtio_transaction)
606 {
607 TC_PRINT("initializing iodev test devices\n");
608
609 for (int i = 0; i < 2; i++) {
610 rtio_iodev_test_init(iodev_test_transaction[i]);
611 }
612
613 TC_PRINT("rtio transaction simple\n");
614 for (int i = 0; i < TEST_REPEATS; i++) {
615 test_rtio_transaction_(&r_transaction);
616 }
617 }
618
ZTEST(rtio_api,test_rtio_cqe_count_overflow)619 ZTEST(rtio_api, test_rtio_cqe_count_overflow)
620 {
621 /* atomic_t max value as `uintptr_t` */
622 const atomic_t max_uval = UINTPTR_MAX;
623
624 /* atomic_t max value as if it were a signed word `intptr_t` */
625 const atomic_t max_sval = UINTPTR_MAX >> 1;
626
627 TC_PRINT("initializing iodev test devices\n");
628
629 for (int i = 0; i < 2; i++) {
630 rtio_iodev_test_init(iodev_test_transaction[i]);
631 }
632
633 TC_PRINT("rtio transaction CQE overflow\n");
634 atomic_set(&r_transaction.cq_count, max_uval - 3);
635 for (int i = 0; i < TEST_REPEATS; i++) {
636 test_rtio_transaction_(&r_transaction);
637 }
638
639 TC_PRINT("initializing iodev test devices\n");
640
641 for (int i = 0; i < 2; i++) {
642 rtio_iodev_test_init(iodev_test_transaction[i]);
643 }
644
645 TC_PRINT("rtio transaction CQE overflow\n");
646 atomic_set(&r_transaction.cq_count, max_sval - 3);
647 for (int i = 0; i < TEST_REPEATS; i++) {
648 test_rtio_transaction_(&r_transaction);
649 }
650 }
651
652
653 #define THROUGHPUT_ITERS 100000
654 RTIO_DEFINE(r_throughput, SQE_POOL_SIZE, CQE_POOL_SIZE);
655
_test_rtio_throughput(struct rtio * r)656 void _test_rtio_throughput(struct rtio *r)
657 {
658 timing_t start_time, end_time;
659 struct rtio_cqe *cqe;
660 struct rtio_sqe *sqe;
661
662 timing_init();
663 timing_start();
664
665 start_time = timing_counter_get();
666
667 for (uint32_t i = 0; i < THROUGHPUT_ITERS; i++) {
668 sqe = rtio_sqe_acquire(r);
669 rtio_sqe_prep_nop(sqe, NULL, NULL);
670 rtio_submit(r, 0);
671 cqe = rtio_cqe_consume(r);
672 rtio_cqe_release(r, cqe);
673 }
674
675 end_time = timing_counter_get();
676
677 uint64_t cycles = timing_cycles_get(&start_time, &end_time);
678 uint64_t ns = timing_cycles_to_ns(cycles);
679
680 TC_PRINT("%llu ns for %d iterations, %llu ns per op\n",
681 ns, THROUGHPUT_ITERS, ns/THROUGHPUT_ITERS);
682 }
683
684
ZTEST(rtio_api,test_rtio_throughput)685 ZTEST(rtio_api, test_rtio_throughput)
686 {
687 _test_rtio_throughput(&r_throughput);
688 }
689
690 RTIO_DEFINE(r_callback_chaining, SQE_POOL_SIZE, CQE_POOL_SIZE);
691 RTIO_IODEV_TEST_DEFINE(iodev_test_callback_chaining0);
692 static bool cb_no_cqe_run;
693
694 /**
695 * Callback for testing with
696 */
rtio_callback_chaining_cb(struct rtio * r,const struct rtio_sqe * sqe,void * arg0)697 void rtio_callback_chaining_cb(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
698 {
699 TC_PRINT("chaining callback with userdata %p\n", arg0);
700 }
701
rtio_callback_chaining_cb_no_cqe(struct rtio * r,const struct rtio_sqe * sqe,void * arg0)702 void rtio_callback_chaining_cb_no_cqe(struct rtio *r, const struct rtio_sqe *sqe, void *arg0)
703 {
704 TC_PRINT("Chaining callback with userdata %p (No CQE)\n", arg0);
705 cb_no_cqe_run = true;
706 }
707
708 /**
709 * @brief Test callback chaining requests
710 *
711 * Ensures that we can setup an RTIO context, enqueue a transaction of requests,
712 * receive completion events, and catch a callback at the end in the correct
713 * order
714 */
test_rtio_callback_chaining_(struct rtio * r)715 void test_rtio_callback_chaining_(struct rtio *r)
716 {
717 int res;
718 int32_t userdata[4] = {0, 1, 2, 3};
719 int32_t ordering[4] = { -1, -1, -1, -1};
720 struct rtio_sqe *sqe;
721 struct rtio_cqe *cqe;
722 uintptr_t cq_count = atomic_get(&r->cq_count);
723
724 rtio_iodev_test_init(&iodev_test_callback_chaining0);
725
726 sqe = rtio_sqe_acquire(r);
727 zassert_not_null(sqe, "Expected a valid sqe");
728 rtio_sqe_prep_callback(sqe, &rtio_callback_chaining_cb, sqe, &userdata[0]);
729 sqe->flags |= RTIO_SQE_CHAINED;
730
731 sqe = rtio_sqe_acquire(r);
732 zassert_not_null(sqe, "Expected a valid sqe");
733 rtio_sqe_prep_nop(sqe, &iodev_test_callback_chaining0, &userdata[1]);
734 sqe->flags |= RTIO_SQE_TRANSACTION;
735
736 sqe = rtio_sqe_acquire(r);
737 zassert_not_null(sqe, "Expected a valid sqe");
738 rtio_sqe_prep_nop(sqe, &iodev_test_callback_chaining0, &userdata[2]);
739 sqe->flags |= RTIO_SQE_CHAINED;
740
741 sqe = rtio_sqe_acquire(r);
742 zassert_not_null(sqe, "Expected a valid sqe");
743 rtio_sqe_prep_callback_no_cqe(sqe, &rtio_callback_chaining_cb_no_cqe, sqe, NULL);
744 sqe->flags |= RTIO_SQE_CHAINED;
745
746 sqe = rtio_sqe_acquire(r);
747 zassert_not_null(sqe, "Expected a valid sqe");
748 rtio_sqe_prep_callback(sqe, &rtio_callback_chaining_cb, sqe, &userdata[3]);
749
750 TC_PRINT("submitting\n");
751 res = rtio_submit(r, 4);
752 TC_PRINT("checking cq, completions available, count at start %lu, current count %lu\n",
753 cq_count, atomic_get(&r->cq_count));
754 zassert_ok(res, "Should return ok from rtio_execute");
755 zassert_equal(atomic_get(&r->cq_count) - cq_count, 4, "Should have 4 pending completions");
756 zassert_true(cb_no_cqe_run, "Callback without CQE should have run");
757
758 for (int i = 0; i < 4; i++) {
759 TC_PRINT("consume %d\n", i);
760 cqe = rtio_cqe_consume(r);
761 zassert_not_null(cqe, "Expected a valid cqe");
762 zassert_ok(cqe->result, "Result should be ok");
763
764 int32_t idx = *(int32_t *)cqe->userdata;
765
766 TC_PRINT("userdata is %p, value %d\n", cqe->userdata, idx);
767 ordering[idx] = i;
768
769 rtio_cqe_release(r, cqe);
770 }
771
772 for (int i = 0; i < 4; i++) {
773 zassert_equal(ordering[i], i,
774 "Execpted ordering of completions to match submissions");
775 }
776 }
777
ZTEST(rtio_api,test_rtio_callback_chaining)778 ZTEST(rtio_api, test_rtio_callback_chaining)
779 {
780 test_rtio_callback_chaining_(&r_callback_chaining);
781 }
782
rtio_api_setup(void)783 static void *rtio_api_setup(void)
784 {
785 #ifdef CONFIG_USERSPACE
786 k_mem_domain_init(&rtio_domain, 0, NULL);
787 k_mem_domain_add_partition(&rtio_domain, &rtio_partition);
788 #if Z_LIBC_PARTITION_EXISTS
789 k_mem_domain_add_partition(&rtio_domain, &z_libc_partition);
790 #endif /* Z_LIBC_PARTITION_EXISTS */
791 #endif /* CONFIG_USERSPACE */
792
793 return NULL;
794 }
795
rtio_api_before(void * a)796 static void rtio_api_before(void *a)
797 {
798 ARG_UNUSED(a);
799
800 STRUCT_SECTION_FOREACH(rtio, r)
801 {
802 struct rtio_cqe cqe;
803
804 while (rtio_cqe_copy_out(r, &cqe, 1, K_MSEC(15))) {
805 }
806 }
807
808 rtio_iodev_test_init(&iodev_test_simple);
809 rtio_iodev_test_init(&iodev_test_syscall);
810 #ifdef CONFIG_USERSPACE
811 k_mem_domain_add_thread(&rtio_domain, k_current_get());
812 rtio_access_grant(&r_simple, k_current_get());
813 rtio_access_grant(&r_syscall, k_current_get());
814 k_object_access_grant(&iodev_test_simple, k_current_get());
815 k_object_access_grant(&iodev_test_syscall, k_current_get());
816 #endif
817 }
818
819 ZTEST_SUITE(rtio_api, NULL, rtio_api_setup, rtio_api_before, NULL, NULL);
820