1 /*
2 * Copyright (c) 2024 Croxel Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/rtio/rtio.h>
10 #include <zephyr/rtio/work.h>
11
12 /** Used to validate/control test execution flow */
13 K_SEM_DEFINE(work_handler_sem_1, 0, 1);
14 K_SEM_DEFINE(work_handler_sem_2, 0, 1);
15 K_SEM_DEFINE(work_handler_sem_3, 0, 1);
16 static int work_handler_called;
17
work_handler(struct rtio_iodev_sqe * iodev_sqe)18 static void work_handler(struct rtio_iodev_sqe *iodev_sqe)
19 {
20 struct rtio_sqe *sqe = &iodev_sqe->sqe;
21 struct k_sem *sem = (struct k_sem *)sqe->userdata;
22
23 work_handler_called++;
24 printk("\t- %s() called!: %d\n", __func__, work_handler_called);
25
26 k_sem_take(sem, K_FOREVER);
27
28 rtio_executor_ok(iodev_sqe, 0);
29 }
30
dummy_submit(struct rtio_iodev_sqe * iodev_sqe)31 static void dummy_submit(struct rtio_iodev_sqe *iodev_sqe)
32 {
33 struct rtio_work_req *req = rtio_work_req_alloc();
34
35 rtio_work_req_submit(req, iodev_sqe, work_handler);
36 }
37
38 struct rtio_iodev_api r_iodev_test_api = {
39 .submit = dummy_submit,
40 };
41
42 RTIO_IODEV_DEFINE(dummy_iodev, &r_iodev_test_api, NULL);
43 RTIO_IODEV_DEFINE(dummy_iodev_2, &r_iodev_test_api, NULL);
44 RTIO_IODEV_DEFINE(dummy_iodev_3, &r_iodev_test_api, NULL);
45
46 RTIO_DEFINE(r_test, 3, 3);
47 RTIO_DEFINE(r_test_2, 3, 3);
48 RTIO_DEFINE(r_test_3, 3, 3);
49
before(void * unused)50 static void before(void *unused)
51 {
52 rtio_sqe_drop_all(&r_test);
53 rtio_sqe_drop_all(&r_test_2);
54 rtio_sqe_drop_all(&r_test_3);
55
56 k_sem_init(&work_handler_sem_1, 0, 1);
57 k_sem_init(&work_handler_sem_2, 0, 1);
58 k_sem_init(&work_handler_sem_3, 0, 1);
59
60 work_handler_called = 0;
61 }
62
after(void * unused)63 static void after(void *unused)
64 {
65 }
66
67 ZTEST_SUITE(rtio_work, NULL, NULL, before, after, NULL);
68
ZTEST(rtio_work,test_work_decouples_submission)69 ZTEST(rtio_work, test_work_decouples_submission)
70 {
71 struct rtio_sqe *sqe;
72 struct rtio_cqe *cqe;
73
74 sqe = rtio_sqe_acquire(&r_test);
75 rtio_sqe_prep_nop(sqe, &dummy_iodev, &work_handler_sem_1);
76 sqe->prio = RTIO_PRIO_NORM;
77
78 zassert_equal(0, work_handler_called);
79 zassert_equal(0, rtio_work_req_used_count_get());
80
81 zassert_ok(rtio_submit(&r_test, 0));
82
83 zassert_equal(1, work_handler_called);
84 zassert_equal(1, rtio_work_req_used_count_get());
85
86 k_sem_give(&work_handler_sem_1);
87 zassert_equal(0, rtio_work_req_used_count_get());
88
89 /** Clean-up */
90 cqe = rtio_cqe_consume_block(&r_test);
91 rtio_cqe_release(&r_test, cqe);
92 }
93
ZTEST(rtio_work,test_work_supports_batching_submissions)94 ZTEST(rtio_work, test_work_supports_batching_submissions)
95 {
96 struct rtio_sqe *sqe_a;
97 struct rtio_sqe *sqe_b;
98 struct rtio_sqe *sqe_c;
99 struct rtio_cqe *cqe;
100
101 sqe_a = rtio_sqe_acquire(&r_test);
102 rtio_sqe_prep_nop(sqe_a, &dummy_iodev, &work_handler_sem_1);
103 sqe_a->prio = RTIO_PRIO_NORM;
104
105 sqe_b = rtio_sqe_acquire(&r_test);
106 rtio_sqe_prep_nop(sqe_b, &dummy_iodev, &work_handler_sem_2);
107 sqe_b->prio = RTIO_PRIO_NORM;
108
109 sqe_c = rtio_sqe_acquire(&r_test);
110 rtio_sqe_prep_nop(sqe_c, &dummy_iodev, &work_handler_sem_3);
111 sqe_c->prio = RTIO_PRIO_NORM;
112
113 zassert_ok(rtio_submit(&r_test, 0));
114
115 k_sem_give(&work_handler_sem_1);
116 k_sem_give(&work_handler_sem_2);
117 k_sem_give(&work_handler_sem_3);
118
119 zassert_equal(3, work_handler_called);
120 zassert_equal(0, rtio_work_req_used_count_get());
121
122 /** Clean-up */
123 cqe = rtio_cqe_consume_block(&r_test);
124 rtio_cqe_release(&r_test, cqe);
125 cqe = rtio_cqe_consume_block(&r_test);
126 rtio_cqe_release(&r_test, cqe);
127 cqe = rtio_cqe_consume_block(&r_test);
128 rtio_cqe_release(&r_test, cqe);
129 }
130
ZTEST(rtio_work,test_work_supports_working_same_prio_items_on_separate_threads)131 ZTEST(rtio_work, test_work_supports_working_same_prio_items_on_separate_threads)
132 {
133 struct rtio_sqe *sqe_a;
134 struct rtio_sqe *sqe_b;
135 struct rtio_cqe *cqe;
136
137 sqe_a = rtio_sqe_acquire(&r_test);
138 rtio_sqe_prep_nop(sqe_a, &dummy_iodev, &work_handler_sem_1);
139 sqe_a->prio = RTIO_PRIO_NORM;
140
141 sqe_b = rtio_sqe_acquire(&r_test_2);
142 rtio_sqe_prep_nop(sqe_b, &dummy_iodev_2, &work_handler_sem_2);
143 sqe_b->prio = RTIO_PRIO_NORM;
144
145 zassert_ok(rtio_submit(&r_test, 0));
146 zassert_ok(rtio_submit(&r_test_2, 0));
147
148 zassert_equal(2, work_handler_called);
149 zassert_equal(2, rtio_work_req_used_count_get());
150
151 k_sem_give(&work_handler_sem_1);
152 k_sem_give(&work_handler_sem_2);
153
154 zassert_equal(2, work_handler_called);
155 zassert_equal(0, rtio_work_req_used_count_get());
156
157 /** Clean-up */
158 cqe = rtio_cqe_consume_block(&r_test);
159 rtio_cqe_release(&r_test, cqe);
160 cqe = rtio_cqe_consume_block(&r_test_2);
161 rtio_cqe_release(&r_test_2, cqe);
162 }
163
ZTEST(rtio_work,test_work_supports_preempting_on_higher_prio_submissions)164 ZTEST(rtio_work, test_work_supports_preempting_on_higher_prio_submissions)
165 {
166 struct rtio_sqe *sqe_a;
167 struct rtio_sqe *sqe_b;
168 struct rtio_sqe *sqe_c;
169 struct rtio_cqe *cqe;
170
171 sqe_a = rtio_sqe_acquire(&r_test);
172 rtio_sqe_prep_nop(sqe_a, &dummy_iodev, &work_handler_sem_1);
173 sqe_a->prio = RTIO_PRIO_LOW;
174
175 sqe_b = rtio_sqe_acquire(&r_test_2);
176 rtio_sqe_prep_nop(sqe_b, &dummy_iodev_2, &work_handler_sem_2);
177 sqe_b->prio = RTIO_PRIO_NORM;
178
179 sqe_c = rtio_sqe_acquire(&r_test_3);
180 rtio_sqe_prep_nop(sqe_c, &dummy_iodev_3, &work_handler_sem_3);
181 sqe_c->prio = RTIO_PRIO_HIGH;
182
183 zassert_ok(rtio_submit(&r_test, 0));
184 zassert_ok(rtio_submit(&r_test_2, 0));
185 zassert_ok(rtio_submit(&r_test_3, 0));
186
187 zassert_equal(3, work_handler_called);
188 zassert_equal(3, rtio_work_req_used_count_get());
189
190 k_sem_give(&work_handler_sem_1);
191 k_sem_give(&work_handler_sem_2);
192 k_sem_give(&work_handler_sem_3);
193
194 zassert_equal(3, work_handler_called);
195 zassert_equal(0, rtio_work_req_used_count_get());
196
197 /** Clean-up */
198 cqe = rtio_cqe_consume_block(&r_test);
199 rtio_cqe_release(&r_test, cqe);
200 cqe = rtio_cqe_consume_block(&r_test_2);
201 rtio_cqe_release(&r_test_2, cqe);
202 cqe = rtio_cqe_consume_block(&r_test_3);
203 rtio_cqe_release(&r_test_3, cqe);
204 }
205
ZTEST(rtio_work,test_used_count_keeps_track_of_alloc_items)206 ZTEST(rtio_work, test_used_count_keeps_track_of_alloc_items)
207 {
208 struct rtio_work_req *req_a = NULL;
209 struct rtio_work_req *req_b = NULL;
210 struct rtio_work_req *req_c = NULL;
211 struct rtio_work_req *req_d = NULL;
212 struct rtio_work_req *req_e = NULL;
213
214 zassert_equal(0, rtio_work_req_used_count_get());
215
216 /** We expect valid items and the count kept track */
217 req_a = rtio_work_req_alloc();
218 zassert_not_null(req_a);
219 zassert_equal(1, rtio_work_req_used_count_get());
220
221 req_b = rtio_work_req_alloc();
222 zassert_not_null(req_b);
223 zassert_equal(2, rtio_work_req_used_count_get());
224
225 req_c = rtio_work_req_alloc();
226 zassert_not_null(req_c);
227 zassert_equal(3, rtio_work_req_used_count_get());
228
229 req_d = rtio_work_req_alloc();
230 zassert_not_null(req_d);
231 zassert_equal(4, rtio_work_req_used_count_get());
232
233 /** This time should not have been able to allocate. */
234 req_e = rtio_work_req_alloc();
235 zassert_is_null(req_e);
236 zassert_equal(4, rtio_work_req_used_count_get());
237
238 /** Flush requests */
239 rtio_work_req_submit(req_a, NULL, NULL);
240 zassert_equal(3, rtio_work_req_used_count_get());
241
242 rtio_work_req_submit(req_b, NULL, NULL);
243 zassert_equal(2, rtio_work_req_used_count_get());
244
245 rtio_work_req_submit(req_c, NULL, NULL);
246 zassert_equal(1, rtio_work_req_used_count_get());
247
248 rtio_work_req_submit(req_d, NULL, NULL);
249 zassert_equal(0, rtio_work_req_used_count_get());
250 }
251