1 /*
2  * Copyright (c) 2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/irq_offload.h>
9 #include <zephyr/interrupt_util.h>
10 #if defined(CONFIG_ARCH_POSIX)
11 #include <soc.h>
12 #endif
13 
14 #define STACK_SIZE	1024
15 #define NUM_WORK	4
16 
17 static struct k_work offload_work[NUM_WORK];
18 static struct k_work_q wq_queue;
19 static K_THREAD_STACK_DEFINE(wq_stack, STACK_SIZE);
20 static K_THREAD_STACK_DEFINE(tstack, STACK_SIZE);
21 static struct k_thread tdata;
22 
23 static struct k_sem sync_sem;
24 static struct k_sem end_sem;
25 static bool wait_for_end;
26 static atomic_t submit_success;
27 static atomic_t offload_job_cnt;
28 
29 /*
30  * This global variable control if the priority of offload job
31  * is greater than the original thread.
32  */
33 static bool offload_job_prio_higher;
34 
35 static volatile int orig_t_keep_run;
36 
37 /* record the initialized interrupt vector for reuse */
38 static int vector_num;
39 
40 enum {
41 	TEST_OFFLOAD_MULTI_JOBS,
42 	TEST_OFFLOAD_IDENTICAL_JOBS
43 };
44 
45 struct interrupt_param {
46 	struct k_work *work;
47 };
48 
49 static struct interrupt_param irq_param;
50 
51 /* thread entry of doing offload job */
entry_offload_job(struct k_work * work)52 static void entry_offload_job(struct k_work *work)
53 {
54 	if (offload_job_prio_higher) {
55 		/*TESTPOINT: offload thread run right after irq end.*/
56 		zassert_equal(orig_t_keep_run, 0,
57 			"the offload did not run immediately.");
58 	} else {
59 		/*TESTPOINT: original thread run right after irq end.*/
60 		zassert_equal(orig_t_keep_run, 1,
61 			"the offload did not run immediately.");
62 	}
63 
64 	atomic_inc(&offload_job_cnt);
65 	k_sem_give(&end_sem);
66 }
67 
68 /* offload work to work queue */
isr_handler(const void * param)69 void isr_handler(const void *param)
70 {
71 	struct k_work *work = ((struct interrupt_param *)param)->work;
72 
73 	zassert_not_null(work, "kwork should not be NULL");
74 
75 	orig_t_keep_run = 0;
76 
77 	/* If the work is busy, we don't submit it. */
78 	if (!k_work_busy_get(work)) {
79 		zassert_equal(k_work_submit_to_queue(&wq_queue, work),
80 				1, "kwork not submitted or queued");
81 
82 		atomic_inc(&submit_success);
83 	}
84 }
85 
86 #if defined(CONFIG_DYNAMIC_INTERRUPTS)
87 /*
88  * So far, we only test x86 and arch posix by real dynamic interrupt.
89  * Other arch will be add later.
90  */
91 #if defined(CONFIG_X86)
92 #define TEST_IRQ_DYN_LINE 26
93 
94 #elif defined(CONFIG_ARCH_POSIX)
95 #if defined(OFFLOAD_SW_IRQ)
96 #define TEST_IRQ_DYN_LINE OFFLOAD_SW_IRQ
97 #else
98 #define TEST_IRQ_DYN_LINE 0
99 #endif
100 
101 #else
102 #define TEST_IRQ_DYN_LINE 0
103 #endif
104 
105 #endif
106 
init_dyn_interrupt(void)107 static void init_dyn_interrupt(void)
108 {
109 	/* If we cannot get a dynamic interrupt, skip test. */
110 	if (TEST_IRQ_DYN_LINE == 0) {
111 		ztest_test_skip();
112 	}
113 
114 	/* We just initialize dynamic interrupt once, then reuse them */
115 	if (!vector_num) {
116 		vector_num = irq_connect_dynamic(TEST_IRQ_DYN_LINE, 1,
117 					isr_handler, (void *)&irq_param, 0);
118 	}
119 
120 	TC_PRINT("vector(%d)\n", vector_num);
121 	zassert_true(vector_num > 0, "no vector can be used");
122 	irq_enable(TEST_IRQ_DYN_LINE);
123 }
124 
trigger_offload_interrupt(const bool real_irq,void * work)125 static void trigger_offload_interrupt(const bool real_irq, void *work)
126 {
127 	irq_param.work = work;
128 
129 	if (real_irq) {
130 		trigger_irq(vector_num);
131 	} else {
132 		irq_offload((irq_offload_routine_t)&isr_handler, &irq_param);
133 	}
134 }
135 
t_running(void * p1,void * p2,void * p3)136 static void t_running(void *p1, void *p2, void *p3)
137 {
138 	ARG_UNUSED(p1);
139 	ARG_UNUSED(p2);
140 	ARG_UNUSED(p3);
141 
142 	k_sem_give(&sync_sem);
143 
144 	while (wait_for_end == false) {
145 		orig_t_keep_run = 1;
146 		k_usleep(150);
147 	}
148 }
149 
init_env(int real_irq)150 static void init_env(int real_irq)
151 {
152 	static bool wq_already_start;
153 
154 	/* semaphore used to sync the end */
155 	k_sem_init(&sync_sem, 0, 1);
156 	k_sem_init(&end_sem, 0, NUM_WORK);
157 
158 	/* initialize global variables */
159 	submit_success = 0;
160 	offload_job_cnt = 0;
161 	orig_t_keep_run = 0;
162 	wait_for_end = false;
163 
164 	/* initialize the dynamic interrupt while using it */
165 	if (real_irq && !vector_num) {
166 		init_dyn_interrupt();
167 	}
168 
169 	/* initialize all the k_work */
170 	for (int i = 0; i < NUM_WORK; i++) {
171 		k_work_init(&offload_work[i], entry_offload_job);
172 	}
173 
174 	/* start a work queue thread if not existing */
175 	if (!wq_already_start) {
176 		k_work_queue_start(&wq_queue, wq_stack, STACK_SIZE,
177 		K_PRIO_PREEMPT(1), NULL);
178 
179 		wq_already_start = true;
180 	}
181 }
182 
run_test_offload(int case_type,int real_irq)183 static void run_test_offload(int case_type, int real_irq)
184 {
185 	int thread_prio = K_PRIO_PREEMPT(0);
186 
187 	/* initialize the global variables */
188 	init_env(real_irq);
189 
190 	/* set priority of offload job higher than thread */
191 	if (offload_job_prio_higher) {
192 		thread_prio = K_PRIO_PREEMPT(2);
193 	}
194 
195 	k_tid_t tid = k_thread_create(&tdata, tstack, STACK_SIZE,
196 			t_running,
197 			NULL, NULL, NULL, thread_prio,
198 			K_INHERIT_PERMS, K_NO_WAIT);
199 
200 	/* wait for thread start */
201 	k_sem_take(&sync_sem, K_FOREVER);
202 
203 	for (int i = 0; i < NUM_WORK; i++) {
204 
205 		switch (case_type) {
206 		case TEST_OFFLOAD_MULTI_JOBS:
207 			trigger_offload_interrupt(real_irq,
208 					(void *)&offload_work[i]);
209 		break;
210 		case TEST_OFFLOAD_IDENTICAL_JOBS:
211 			trigger_offload_interrupt(real_irq,
212 					(void *)&offload_work[0]);
213 		break;
214 		default:
215 			ztest_test_fail();
216 		}
217 	}
218 	/* wait for all offload job complete */
219 	for (int i = 0; i < atomic_get(&submit_success); i++) {
220 		k_sem_take(&end_sem, K_FOREVER);
221 	}
222 
223 	zassert_equal(submit_success, offload_job_cnt,
224 			"submitted job unmatch offload");
225 
226 	/* notify the running thread to end */
227 	wait_for_end = true;
228 
229 	k_thread_join(tid, K_FOREVER);
230 }
231 
232 /**
233  * @brief Test interrupt offload work to multiple jobs
234  *
235  * @ingroup kernel_interrupt_tests
236  *
237  * @details Validate isr can offload workload to multi work queue, and:
238  *
239  * - If the priority of the original thread < offload job, offload jobs
240  *   could execute immediately.
241  *
242  * - If the priority of the original thread >= offload job, offload
243  *   jobs will not execute immediately.
244  *
245  * We test this by irq_offload().
246  */
ZTEST(interrupt_feature,test_isr_offload_job_multiple)247 ZTEST(interrupt_feature, test_isr_offload_job_multiple)
248 {
249 	offload_job_prio_higher = false;
250 	run_test_offload(TEST_OFFLOAD_MULTI_JOBS, false);
251 
252 	offload_job_prio_higher = true;
253 	run_test_offload(TEST_OFFLOAD_MULTI_JOBS, false);
254 }
255 
256 /**
257  * @brief Test interrupt offload work to identical jobs
258  *
259  * @ingroup kernel_interrupt_tests
260  *
261  * @details Validate isr can offload workload to work queue, and all
262  * the offload jobs use the same thread entry, and:
263  *
264  * - If the priority of the original thread < offload job, offload jobs
265  *   could execute immediately.
266  *
267  * - If the priority of the original thread >= offload job, offload
268  *   jobs will not execute immediately.
269  *
270  * We test this by irq_offload().
271  */
ZTEST(interrupt_feature,test_isr_offload_job_identi)272 ZTEST(interrupt_feature, test_isr_offload_job_identi)
273 {
274 	offload_job_prio_higher = false;
275 	run_test_offload(TEST_OFFLOAD_IDENTICAL_JOBS, false);
276 
277 	offload_job_prio_higher = true;
278 	run_test_offload(TEST_OFFLOAD_IDENTICAL_JOBS, false);
279 }
280 
281 /**
282  * @brief Test interrupt offload work by dynamic interrupt
283  *
284  * @ingroup kernel_interrupt_tests
285  *
286  * @details Validate isr can offload workload to work queue, and the
287  * offload jobs could execute immediately base on it's priority.
288  * We test this by dynamic interrupt.
289  */
ZTEST(interrupt_feature,test_isr_offload_job)290 ZTEST(interrupt_feature, test_isr_offload_job)
291 {
292 	if (!IS_ENABLED(CONFIG_DYNAMIC_INTERRUPTS)) {
293 		ztest_test_skip();
294 	}
295 
296 	offload_job_prio_higher = true;
297 	run_test_offload(TEST_OFFLOAD_MULTI_JOBS, true);
298 	run_test_offload(TEST_OFFLOAD_IDENTICAL_JOBS, true);
299 }
300