1 /*
2 * Copyright (c) 2021 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/ztest.h>
8 #include <zephyr/irq_offload.h>
9 #include <zephyr/interrupt_util.h>
10 #if defined(CONFIG_ARCH_POSIX)
11 #include <soc.h>
12 #endif
13
14 #define STACK_SIZE 1024 + CONFIG_TEST_EXTRA_STACK_SIZE
15 #define NUM_WORK 4
16
17 static struct k_work offload_work[NUM_WORK];
18 static struct k_work_q wq_queue;
19 static K_THREAD_STACK_DEFINE(wq_stack, STACK_SIZE);
20 static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
21 static struct k_thread tdata;
22
23 static struct k_sem sync_sem;
24 static struct k_sem end_sem;
25 static bool wait_for_end;
26 static atomic_t submit_success;
27 static atomic_t offload_job_cnt;
28
29 /*
30 * This global variable control if the priority of offload job
31 * is greater than the original thread.
32 */
33 static bool offload_job_prio_higher;
34
35 static volatile int orig_t_keep_run;
36
37 /* record the initialized interrupt vector for reuse */
38 static int vector_num;
39
40 enum {
41 TEST_OFFLOAD_MULTI_JOBS,
42 TEST_OFFLOAD_IDENTICAL_JOBS
43 };
44
45 struct interrupt_param {
46 struct k_work *work;
47 };
48
49 static struct interrupt_param irq_param;
50
51 /* thread entry of doing offload job */
entry_offload_job(struct k_work * work)52 static void entry_offload_job(struct k_work *work)
53 {
54 if (offload_job_prio_higher) {
55 /*TESTPOINT: offload thread run right after irq end.*/
56 zassert_equal(orig_t_keep_run, 0,
57 "the offload did not run immediately.");
58 } else {
59 /*TESTPOINT: original thread run right after irq end.*/
60 zassert_equal(orig_t_keep_run, 1,
61 "the offload did not run immediately.");
62 }
63
64 atomic_inc(&offload_job_cnt);
65 k_sem_give(&end_sem);
66 }
67
68 /* offload work to work queue */
isr_handler(const void * param)69 void isr_handler(const void *param)
70 {
71 struct k_work *work = ((struct interrupt_param *)param)->work;
72
73 zassert_not_null(work, "kwork should not be NULL");
74
75 orig_t_keep_run = 0;
76
77 /* If the work is busy, we don't submit it. */
78 if (!k_work_busy_get(work)) {
79 zassert_equal(k_work_submit_to_queue(&wq_queue, work),
80 1, "kwork not submitted or queued");
81
82 atomic_inc(&submit_success);
83 }
84 }
85
86 #if defined(CONFIG_DYNAMIC_INTERRUPTS)
87 /*
88 * So far, we only test x86 and arch posix by real dynamic interrupt.
89 * Other arch will be add later.
90 */
91 #if defined(CONFIG_X86)
92 #define TEST_IRQ_DYN_LINE 26
93
94 #elif defined(CONFIG_ARCH_POSIX)
95 #if defined(OFFLOAD_SW_IRQ)
96 #define TEST_IRQ_DYN_LINE OFFLOAD_SW_IRQ
97 #else
98 #define TEST_IRQ_DYN_LINE 0
99 #endif
100
101 #else
102 #define TEST_IRQ_DYN_LINE 0
103 #endif
104
105 #else
106 #define TEST_IRQ_DYN_LINE 0
107 #endif
108
init_dyn_interrupt(void)109 static void init_dyn_interrupt(void)
110 {
111 /* If we cannot get a dynamic interrupt, skip test. */
112 if (TEST_IRQ_DYN_LINE == 0) {
113 ztest_test_skip();
114 }
115
116 #if defined(CONFIG_DYNAMIC_INTERRUPTS)
117 /* We just initialize dynamic interrupt once, then reuse them */
118 if (!vector_num) {
119 vector_num = irq_connect_dynamic(TEST_IRQ_DYN_LINE, 1,
120 isr_handler, (void *)&irq_param, 0);
121 }
122 #endif
123
124 TC_PRINT("vector(%d)\n", vector_num);
125 zassert_true(vector_num > 0, "no vector can be used");
126 irq_enable(TEST_IRQ_DYN_LINE);
127 }
128
trigger_offload_interrupt(const bool real_irq,void * work)129 static void trigger_offload_interrupt(const bool real_irq, void *work)
130 {
131 irq_param.work = work;
132
133 if (real_irq) {
134 trigger_irq(vector_num);
135 } else {
136 irq_offload((irq_offload_routine_t)&isr_handler, &irq_param);
137 }
138 }
139
t_running(void * p1,void * p2,void * p3)140 static void t_running(void *p1, void *p2, void *p3)
141 {
142 ARG_UNUSED(p1);
143 ARG_UNUSED(p2);
144 ARG_UNUSED(p3);
145
146 k_sem_give(&sync_sem);
147
148 while (wait_for_end == false) {
149 orig_t_keep_run = 1;
150 k_usleep(150);
151 }
152 }
153
init_env(int real_irq)154 static void init_env(int real_irq)
155 {
156 static bool wq_already_start;
157
158 /* semaphore used to sync the end */
159 k_sem_init(&sync_sem, 0, 1);
160 k_sem_init(&end_sem, 0, NUM_WORK);
161
162 /* initialize global variables */
163 submit_success = 0;
164 offload_job_cnt = 0;
165 orig_t_keep_run = 0;
166 wait_for_end = false;
167
168 /* initialize the dynamic interrupt while using it */
169 if (real_irq && !vector_num) {
170 init_dyn_interrupt();
171 }
172
173 /* initialize all the k_work */
174 for (int i = 0; i < NUM_WORK; i++) {
175 k_work_init(&offload_work[i], entry_offload_job);
176 }
177
178 /* start a work queue thread if not existing */
179 if (!wq_already_start) {
180 k_work_queue_start(&wq_queue, wq_stack, STACK_SIZE,
181 K_PRIO_PREEMPT(1), NULL);
182
183 wq_already_start = true;
184 }
185 }
186
run_test_offload(int case_type,int real_irq)187 static void run_test_offload(int case_type, int real_irq)
188 {
189 int thread_prio = K_PRIO_PREEMPT(0);
190
191 /* initialize the global variables */
192 init_env(real_irq);
193
194 /* set priority of offload job higher than thread */
195 if (offload_job_prio_higher) {
196 thread_prio = K_PRIO_PREEMPT(2);
197 }
198
199 k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
200 t_running,
201 NULL, NULL, NULL, thread_prio,
202 K_INHERIT_PERMS, K_NO_WAIT);
203
204 /* wait for thread start */
205 k_sem_take(&sync_sem, K_FOREVER);
206
207 for (int i = 0; i < NUM_WORK; i++) {
208
209 switch (case_type) {
210 case TEST_OFFLOAD_MULTI_JOBS:
211 trigger_offload_interrupt(real_irq,
212 (void *)&offload_work[i]);
213 break;
214 case TEST_OFFLOAD_IDENTICAL_JOBS:
215 trigger_offload_interrupt(real_irq,
216 (void *)&offload_work[0]);
217 break;
218 default:
219 ztest_test_fail();
220 }
221 }
222 /* wait for all offload job complete */
223 for (int i = 0; i < atomic_get(&submit_success); i++) {
224 k_sem_take(&end_sem, K_FOREVER);
225 }
226
227 zassert_equal(submit_success, offload_job_cnt,
228 "submitted job unmatch offload");
229
230 /* notify the running thread to end */
231 wait_for_end = true;
232
233 k_thread_join(tid, K_FOREVER);
234 }
235
236 /**
237 * @brief Test interrupt offload work to multiple jobs
238 *
239 * @ingroup kernel_interrupt_tests
240 *
241 * @details Validate isr can offload workload to multi work queue, and:
242 *
243 * - If the priority of the original thread < offload job, offload jobs
244 * could execute immediately.
245 *
246 * - If the priority of the original thread >= offload job, offload
247 * jobs will not execute immediately.
248 *
249 * We test this by irq_offload().
250 */
ZTEST(interrupt_feature,test_isr_offload_job_multiple)251 ZTEST(interrupt_feature, test_isr_offload_job_multiple)
252 {
253 offload_job_prio_higher = false;
254 run_test_offload(TEST_OFFLOAD_MULTI_JOBS, false);
255
256 offload_job_prio_higher = true;
257 run_test_offload(TEST_OFFLOAD_MULTI_JOBS, false);
258 }
259
260 /**
261 * @brief Test interrupt offload work to identical jobs
262 *
263 * @ingroup kernel_interrupt_tests
264 *
265 * @details Validate isr can offload workload to work queue, and all
266 * the offload jobs use the same thread entry, and:
267 *
268 * - If the priority of the original thread < offload job, offload jobs
269 * could execute immediately.
270 *
271 * - If the priority of the original thread >= offload job, offload
272 * jobs will not execute immediately.
273 *
274 * We test this by irq_offload().
275 */
ZTEST(interrupt_feature,test_isr_offload_job_identi)276 ZTEST(interrupt_feature, test_isr_offload_job_identi)
277 {
278 offload_job_prio_higher = false;
279 run_test_offload(TEST_OFFLOAD_IDENTICAL_JOBS, false);
280
281 offload_job_prio_higher = true;
282 run_test_offload(TEST_OFFLOAD_IDENTICAL_JOBS, false);
283 }
284
285 /**
286 * @brief Test interrupt offload work by dynamic interrupt
287 *
288 * @ingroup kernel_interrupt_tests
289 *
290 * @details Validate isr can offload workload to work queue, and the
291 * offload jobs could execute immediately base on it's priority.
292 * We test this by dynamic interrupt.
293 */
ZTEST(interrupt_feature,test_isr_offload_job)294 ZTEST(interrupt_feature, test_isr_offload_job)
295 {
296 if (!IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS)) {
297 ztest_test_skip();
298 }
299
300 offload_job_prio_higher = true;
301 run_test_offload(TEST_OFFLOAD_MULTI_JOBS, true);
302 run_test_offload(TEST_OFFLOAD_IDENTICAL_JOBS, true);
303 }
304