1 /*
2 * Copyright (c) 2017 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Offload to the Kernel workqueue
11 *
12 * This test verifies that the kernel workqueue operates as
13 * expected.
14 *
15 * This test has two threads that increment a counter. The routine that
16 * increments the counter is invoked from workqueue due to the two threads
17 * calling using it. The final result of the counter is expected
18 * to be the the number of times work item was called to increment
19 * the counter.
20 *
21 * This is done with time slicing both disabled and enabled to ensure that the
22 * result always matches the number of times the workqueue is called.
23 *
24 * @{
25 * @}
26 */
27 #include <zephyr/kernel.h>
28 #include <zephyr/linker/sections.h>
29 #include <zephyr/ztest.h>
30
31 #define NUM_MILLISECONDS 50
32 #define TEST_TIMEOUT 200
33
34 #ifdef CONFIG_COVERAGE_GCOV
35 #define OFFLOAD_WORKQUEUE_STACK_SIZE 4096
36 #else
37 #define OFFLOAD_WORKQUEUE_STACK_SIZE 1024
38 #endif
39
40
41 static uint32_t critical_var;
42 static uint32_t alt_thread_iterations;
43
44 static struct k_work_q offload_work_q;
45 static K_THREAD_STACK_DEFINE(offload_work_q_stack,
46 OFFLOAD_WORKQUEUE_STACK_SIZE);
47
48 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
49
50 static K_THREAD_STACK_DEFINE(stack1, STACK_SIZE);
51 static K_THREAD_STACK_DEFINE(stack2, STACK_SIZE);
52
53 static struct k_thread thread1;
54 static struct k_thread thread2;
55
56 K_SEM_DEFINE(ALT_SEM, 0, UINT_MAX);
57 K_SEM_DEFINE(REGRESS_SEM, 0, UINT_MAX);
58 K_SEM_DEFINE(TEST_SEM, 0, UINT_MAX);
59
60 /**
61 * @brief Routine to be called from a workqueue
62 *
63 * This routine increments the global variable @a critical_var.
64 */
critical_rtn(struct k_work * unused)65 void critical_rtn(struct k_work *unused)
66 {
67 volatile uint32_t x;
68
69 ARG_UNUSED(unused);
70
71 x = critical_var;
72 critical_var = x + 1;
73 }
74
75 /**
76 * @brief Common code for invoking work
77 *
78 * @param tag text identifying the invocation context
79 * @param count number of critical section calls made thus far
80 *
81 * @return number of critical section calls made by a thread
82 */
critical_loop(const char * tag,uint32_t count)83 uint32_t critical_loop(const char *tag, uint32_t count)
84 {
85 int64_t now;
86 int64_t last;
87 int64_t mseconds;
88
89 last = mseconds = k_uptime_get();
90 TC_PRINT("Start %s at %u\n", tag, (uint32_t)last);
91 while (((now = k_uptime_get())) < mseconds + NUM_MILLISECONDS) {
92 struct k_work work_item;
93
94 if (now < last) {
95 TC_PRINT("Time went backwards: %u < %u\n",
96 (uint32_t)now, (uint32_t)last);
97 }
98 last = now;
99
100 k_work_init(&work_item, critical_rtn);
101 k_work_submit_to_queue(&offload_work_q, &work_item);
102 count++;
103 Z_SPIN_DELAY(50);
104 }
105 TC_PRINT("End %s at %u\n", tag, (uint32_t)now);
106
107 return count;
108 }
109
110 /**
111 * @brief Alternate thread
112 *
113 * This routine invokes the workqueue many times.
114 */
alternate_thread(void * arg1,void * arg2,void * arg3)115 void alternate_thread(void *arg1, void *arg2, void *arg3)
116 {
117 ARG_UNUSED(arg1);
118 ARG_UNUSED(arg2);
119 ARG_UNUSED(arg3);
120
121 k_sem_take(&ALT_SEM, K_FOREVER); /* Wait to be activated */
122
123 alt_thread_iterations = critical_loop("alt1", alt_thread_iterations);
124
125 k_sem_give(®RESS_SEM);
126
127 k_sem_take(&ALT_SEM, K_FOREVER); /* Wait to be re-activated */
128
129 alt_thread_iterations = critical_loop("alt2", alt_thread_iterations);
130
131 k_sem_give(®RESS_SEM);
132 }
133
134 /**
135 * @brief Regression thread
136 *
137 * This routine invokes the workqueue many times. It also checks to
138 * ensure that the number of times it is called matches the global variable
139 * @a critical_var.
140 */
141
regression_thread(void * arg1,void * arg2,void * arg3)142 void regression_thread(void *arg1, void *arg2, void *arg3)
143 {
144 uint32_t ncalls = 0U;
145
146 ARG_UNUSED(arg1);
147 ARG_UNUSED(arg2);
148 ARG_UNUSED(arg3);
149
150 k_sem_give(&ALT_SEM); /* Activate alternate_thread() */
151
152 ncalls = critical_loop("reg1", ncalls);
153
154 /* Wait for alternate_thread() to complete */
155 zassert_true(k_sem_take(®RESS_SEM, K_MSEC(TEST_TIMEOUT)) == 0,
156 "Timed out waiting for REGRESS_SEM");
157
158 zassert_equal(critical_var, ncalls + alt_thread_iterations,
159 "Unexpected value for <critical_var>");
160
161 TC_PRINT("Enable timeslicing at %u\n", k_uptime_get_32());
162 k_sched_time_slice_set(20, 10);
163
164 k_sem_give(&ALT_SEM); /* Re-activate alternate_thread() */
165
166 ncalls = critical_loop("reg2", ncalls);
167
168 /* Wait for alternate_thread() to finish */
169 zassert_true(k_sem_take(®RESS_SEM, K_MSEC(TEST_TIMEOUT)) == 0,
170 "Timed out waiting for REGRESS_SEM");
171
172 zassert_equal(critical_var, ncalls + alt_thread_iterations,
173 "Unexpected value for <critical_var>");
174
175 k_sem_give(&TEST_SEM);
176
177 }
178
179 /**
180 * @brief Verify thread context
181 *
182 * @details Check whether variable value per-thread is saved
183 * during context switch
184 *
185 * @ingroup kernel_workqueue_tests
186 */
ZTEST(kernel_offload_wq,test_offload_workqueue)187 ZTEST(kernel_offload_wq, test_offload_workqueue)
188 {
189 critical_var = 0U;
190 alt_thread_iterations = 0U;
191
192 k_work_queue_start(&offload_work_q,
193 offload_work_q_stack,
194 K_THREAD_STACK_SIZEOF(offload_work_q_stack),
195 CONFIG_MAIN_THREAD_PRIORITY, NULL);
196
197 k_thread_create(&thread1, stack1, STACK_SIZE,
198 alternate_thread, NULL, NULL, NULL,
199 K_PRIO_PREEMPT(12), 0, K_NO_WAIT);
200
201 k_thread_create(&thread2, stack2, STACK_SIZE,
202 regression_thread, NULL, NULL, NULL,
203 K_PRIO_PREEMPT(12), 0, K_NO_WAIT);
204
205 zassert_true(k_sem_take(&TEST_SEM, K_MSEC(TEST_TIMEOUT * 2)) == 0,
206 "Timed out waiting for TEST_SEM");
207 }
208
209 ZTEST_SUITE(kernel_offload_wq, NULL, NULL, ztest_simple_1cpu_before,
210 ztest_simple_1cpu_after, NULL);
211