1 /*
2  * Copyright (c) 2015 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/sys/atomic.h>
9 
10 /* convenience macro - return either 64-bit or 32-bit value */
11 #define ATOMIC_WORD(val_if_64, val_if_32)                                                          \
12 	((atomic_t)((sizeof(void *) == sizeof(uint64_t)) ? (val_if_64) : (val_if_32)))
13 
14 /* an example of the number of atomic bit in an array */
15 #define NUM_FLAG_BITS 100
16 
17 /* set test_cycle 1000us * 20 = 20ms */
18 #define TEST_CYCLE 20
19 
20 #define THREADS_NUM 2
21 
22 #define STACK_SIZE (512 + CONFIG_TEST_EXTRA_STACK_SIZE)
23 
24 static K_THREAD_STACK_ARRAY_DEFINE(stack, THREADS_NUM, STACK_SIZE);
25 
26 static struct k_thread thread[THREADS_NUM];
27 
28 atomic_t total_atomic;
29 
30 /**
31  * @addtogroup kernel_common_tests
32  * @{
33  */
34 
35 /**
36  * @brief Verify atomic functionalities
37  * @details
38  * Test Objective:
39  * - Test the function of the atomic operation API is correct.
40  *
41  * Test techniques:
42  * - Dynamic analysis and testing
43  * - Functional and black box testing
44  * - Interface testing
45  *
46  * Prerequisite Conditions:
47  * - N/A
48  *
49  * Input Specifications:
50  * - N/A
51  *
52  * Test Procedure:
53  * -# Call the API interface of the following atomic operations in turn,
54  * judge the change of function return value and target operands.
55  * - atomic_cas()
56  * - atomic_ptr_cas()
57  * - atomic_add()
58  * - atomic_sub()
59  * - atomic_inc()
60  * - atomic_dec()
61  * - atomic_get()
62  * - atomic_ptr_get()
63  * - atomic_set()
64  * - atomic_ptr_set()
65  * - atomic_clear()
66  * - atomic_ptr_clear()
67  * - atomic_or()
68  * - atomic_xor()
69  * - atomic_and()
70  * - atomic_nand()
71  * - atomic_test_bit()
72  * - atomic_test_and_clear_bit()
73  * - atomic_test_and_set_bit()
74  * - atomic_clear_bit()
75  * - atomic_set_bit()
76  * - atomic_set_bit_to()
77  * - ATOMIC_DEFINE
78  *
79  * Expected Test Result:
80  * - The change of function return value and target operands is correct.
81  *
82  * Pass/Fail Criteria:
83  * - Successful if check points in test procedure are all passed, otherwise failure.
84  *
85  * Assumptions and Constraints:
86  * - N/A
87  *
88  * @see atomic_cas(), atomic_add(), atomic_sub(),
89  * atomic_inc(), atomic_dec(), atomic_get(), atomic_set(),
90  * atomic_clear(), atomic_or(), atomic_and(), atomic_xor(),
91  * atomic_nand(), atomic_test_bit(), atomic_test_and_clear_bit(),
92  * atomic_test_and_set_bit(), atomic_clear_bit(), atomic_set_bit(),
93  * ATOMIC_DEFINE
94  *
95  * @ingroup kernel_common_tests
96  */
ZTEST_USER(atomic,test_atomic)97 ZTEST_USER(atomic, test_atomic)
98 {
99 	int i;
100 
101 	atomic_t target, orig;
102 	atomic_ptr_t ptr_target;
103 	atomic_val_t value;
104 	atomic_val_t oldvalue;
105 	void *ptr_value, *old_ptr_value;
106 
107 	ATOMIC_DEFINE(flag_bits, NUM_FLAG_BITS) = {0};
108 
109 	zassert_equal(sizeof(atomic_t), ATOMIC_WORD(sizeof(uint64_t), sizeof(uint32_t)),
110 		      "sizeof(atomic_t)");
111 
112 	target = 4;
113 	value = 5;
114 	oldvalue = 6;
115 
116 	/* atomic_cas() */
117 	zassert_false(atomic_cas(&target, oldvalue, value), "atomic_cas");
118 	target = 6;
119 	zassert_true(atomic_cas(&target, oldvalue, value), "atomic_cas");
120 	zassert_true((target == value), "atomic_cas");
121 
122 	/* atomic_ptr_cas() */
123 	ptr_target = ATOMIC_PTR_INIT((void *)4);
124 	ptr_value = (atomic_ptr_val_t)5;
125 	old_ptr_value = (atomic_ptr_val_t)6;
126 	zassert_false(atomic_ptr_cas(&ptr_target, old_ptr_value, ptr_value),
127 		      "atomic_ptr_cas");
128 	ptr_target = (atomic_ptr_val_t)6;
129 	zassert_true(atomic_ptr_cas(&ptr_target, old_ptr_value, ptr_value),
130 		     "atomic_ptr_cas");
131 	zassert_true((ptr_target == ptr_value), "atomic_ptr_cas");
132 
133 	/* atomic_add() */
134 	target = 1;
135 	value = 2;
136 	zassert_true((atomic_add(&target, value) == 1), "atomic_add");
137 	zassert_true((target == 3), "atomic_add");
138 	/* Test the atomic_add() function parameters can be negative */
139 	target = 2;
140 	value = -4;
141 	zassert_true((atomic_add(&target, value) == 2), "atomic_add");
142 	zassert_true((target == -2), "atomic_add");
143 
144 	/* atomic_sub() */
145 	target = 10;
146 	value = 2;
147 	zassert_true((atomic_sub(&target, value) == 10), "atomic_sub");
148 	zassert_true((target == 8), "atomic_sub");
149 	/* Test the atomic_sub() function parameters can be negative */
150 	target = 5;
151 	value = -4;
152 	zassert_true((atomic_sub(&target, value) == 5), "atomic_sub");
153 	zassert_true((target == 9), "atomic_sub");
154 
155 	/* atomic_inc() */
156 	target = 5;
157 	zassert_true((atomic_inc(&target) == 5), "atomic_inc");
158 	zassert_true((target == 6), "atomic_inc");
159 
160 	/* atomic_dec() */
161 	target = 2;
162 	zassert_true((atomic_dec(&target) == 2), "atomic_dec");
163 	zassert_true((target == 1), "atomic_dec");
164 
165 	/* atomic_get() */
166 	target = 50;
167 	zassert_true((atomic_get(&target) == 50), "atomic_get");
168 
169 	/* atomic_ptr_get() */
170 	ptr_target = ATOMIC_PTR_INIT((void *)50);
171 	zassert_true((atomic_ptr_get(&ptr_target) == (atomic_ptr_val_t)50),
172 		     "atomic_ptr_get");
173 
174 	/* atomic_set() */
175 	target = 42;
176 	value = 77;
177 	zassert_true((atomic_set(&target, value) == 42), "atomic_set");
178 	zassert_true((target == value), "atomic_set");
179 
180 	/* atomic_ptr_set() */
181 	ptr_target = ATOMIC_PTR_INIT((void *)42);
182 	ptr_value = (atomic_ptr_val_t)77;
183 	zassert_true((atomic_ptr_set(&ptr_target, ptr_value) == (atomic_ptr_val_t)42),
184 		     "atomic_ptr_set");
185 	zassert_true((ptr_target == ptr_value), "atomic_ptr_set");
186 
187 	/* atomic_clear() */
188 	target = 100;
189 	zassert_true((atomic_clear(&target) == 100), "atomic_clear");
190 	zassert_true((target == 0), "atomic_clear");
191 
192 	/* atomic_ptr_clear() */
193 	ptr_target = ATOMIC_PTR_INIT((void *)100);
194 	zassert_true((atomic_ptr_clear(&ptr_target) == (atomic_ptr_val_t)100),
195 		     "atomic_ptr_clear");
196 	zassert_true((ptr_target == NULL), "atomic_ptr_clear");
197 
198 	/* atomic_or() */
199 	target = 0xFF00;
200 	value  = 0x0F0F;
201 	zassert_true((atomic_or(&target, value) == 0xFF00), "atomic_or");
202 	zassert_true((target == 0xFF0F), "atomic_or");
203 
204 	/* atomic_xor() */
205 	target = 0xFF00;
206 	value  = 0x0F0F;
207 	zassert_true((atomic_xor(&target, value) == 0xFF00), "atomic_xor");
208 	zassert_true((target == 0xF00F), "atomic_xor");
209 
210 	/* atomic_and() */
211 	target = 0xFF00;
212 	value  = 0x0F0F;
213 	zassert_true((atomic_and(&target, value) == 0xFF00), "atomic_and");
214 	zassert_true((target == 0x0F00), "atomic_and");
215 
216 
217 	/* atomic_nand() */
218 	target = 0xFF00;
219 	value  = 0x0F0F;
220 	zassert_true((atomic_nand(&target, value) == 0xFF00), "atomic_nand");
221 	zassert_true((target == ATOMIC_WORD(0xFFFFFFFFFFFFF0FF, 0xFFFFF0FF)), "atomic_nand");
222 
223 	/* atomic_test_bit() */
224 	for (i = 0; i < ATOMIC_BITS; i++) {
225 		target = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
226 		zassert_true(!!(atomic_test_bit(&target, i) == !!(target & BIT(i))),
227 			     "atomic_test_bit");
228 	}
229 
230 	/* atomic_test_and_clear_bit() */
231 	for (i = 0; i < ATOMIC_BITS; i++) {
232 		orig = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
233 		target = orig;
234 		zassert_true(!!(atomic_test_and_clear_bit(&target, i)) == !!(orig & BIT(i)),
235 			     "atomic_test_and_clear_bit");
236 		zassert_true(target == (orig & ~BIT(i)), "atomic_test_and_clear_bit");
237 	}
238 
239 	/* atomic_test_and_set_bit() */
240 	for (i = 0; i < ATOMIC_BITS; i++) {
241 		orig = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
242 		target = orig;
243 		zassert_true(!!(atomic_test_and_set_bit(&target, i)) == !!(orig & BIT(i)),
244 			     "atomic_test_and_set_bit");
245 		zassert_true(target == (orig | BIT(i)), "atomic_test_and_set_bit");
246 	}
247 
248 	/* atomic_clear_bit() */
249 	for (i = 0; i < ATOMIC_BITS; i++) {
250 		orig = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
251 		target = orig;
252 		atomic_clear_bit(&target, i);
253 		zassert_true(target == (orig & ~BIT(i)), "atomic_clear_bit");
254 	}
255 
256 	/* atomic_set_bit() */
257 	for (i = 0; i < ATOMIC_BITS; i++) {
258 		orig = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
259 		target = orig;
260 		atomic_set_bit(&target, i);
261 		zassert_true(target == (orig | BIT(i)), "atomic_set_bit");
262 	}
263 
264 	/* atomic_set_bit_to(&target, i, false) */
265 	for (i = 0; i < ATOMIC_BITS; i++) {
266 		orig = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
267 		target = orig;
268 		atomic_set_bit_to(&target, i, false);
269 		zassert_true(target == (orig & ~BIT(i)), "atomic_set_bit_to");
270 	}
271 
272 	/* atomic_set_bit_to(&target, i, true) */
273 	for (i = 0; i < ATOMIC_BITS; i++) {
274 		orig = ATOMIC_WORD(0x0F0F0F0F0F0F0F0F, 0x0F0F0F0F);
275 		target = orig;
276 		atomic_set_bit_to(&target, i, true);
277 		zassert_true(target == (orig | BIT(i)), "atomic_set_bit_to");
278 	}
279 
280 	/* ATOMIC_DEFINE */
281 	for (i = 0; i < NUM_FLAG_BITS; i++) {
282 		atomic_set_bit(flag_bits, i);
283 		zassert_true(!!atomic_test_bit(flag_bits, i) == !!(1),
284 			"Failed to set a single bit in an array of atomic variables");
285 		atomic_clear_bit(flag_bits, i);
286 		zassert_true(!!atomic_test_bit(flag_bits, i) == !!(0),
287 			"Failed to clear a single bit in an array of atomic variables");
288 	}
289 }
290 
291 /* This helper function will run more the one slice */
atomic_handler(void * p1,void * p2,void * p3)292 void atomic_handler(void *p1, void *p2, void *p3)
293 {
294 	ARG_UNUSED(p1);
295 	ARG_UNUSED(p2);
296 	ARG_UNUSED(p3);
297 
298 	for (int i = 0; i < TEST_CYCLE; i++) {
299 		atomic_inc(&total_atomic);
300 		/* Do 1000us busywait to longer the handler execute time */
301 		k_busy_wait(1000);
302 	}
303 }
304 
305 /**
306  * @brief Verify atomic operation with threads
307  *
308  * @details Creat two preempt threads with equal priority to
309  * atomically access the same atomic value. Because these preempt
310  * threads are of equal priority, so enable time slice to make
311  * them scheduled. The thread will execute for some time.
312  * In this time, the two sub threads will be scheduled separately
313  * according to the time slice.
314  *
315  * @ingroup kernel_common_tests
316  */
ZTEST(atomic,test_threads_access_atomic)317 ZTEST(atomic, test_threads_access_atomic)
318 {
319 	k_tid_t tid[THREADS_NUM];
320 
321 	/* enable time slice 1ms at priority 10 */
322 	k_sched_time_slice_set(1, K_PRIO_PREEMPT(10));
323 
324 	for (int i = 0; i < THREADS_NUM; i++) {
325 		tid[i] = k_thread_create(&thread[i], stack[i], STACK_SIZE,
326 				atomic_handler, NULL, NULL, NULL,
327 				K_PRIO_PREEMPT(10), 0, K_NO_WAIT);
328 	}
329 
330 	for (int i = 0; i < THREADS_NUM; i++) {
331 		k_thread_join(tid[i], K_FOREVER);
332 	}
333 
334 	/* disable time slice */
335 	k_sched_time_slice_set(0, K_PRIO_PREEMPT(10));
336 
337 	zassert_true(total_atomic == (TEST_CYCLE * THREADS_NUM),
338 		"atomic counting failure");
339 }
340 
341 /**
342  * @brief Checks that the value of atomic_t will be the same in case of overflow
343  *		if incremented in atomic and non-atomic manner
344  *
345  * @details According to C standard the value of a signed variable
346  *	is undefined in case of overflow. This test checks that the the value
347  *	of atomic_t will be the same in case of overflow if incremented in atomic
348  *	and non-atomic manner. This allows us to increment an atomic variable
349  *	in a non-atomic manner (as long as it is logically safe)
350  *	and expect its value to match the result of the similar atomic increment.
351  *
352  * @ingroup kernel_common_tests
353  */
ZTEST(atomic,test_atomic_overflow)354 ZTEST(atomic, test_atomic_overflow)
355 {
356 	/* Check overflow over max signed value */
357 	uint64_t overflowed_value = (uint64_t)1 << (ATOMIC_BITS - 1);
358 	atomic_val_t atomic_value = overflowed_value - 1;
359 	atomic_t atomic_var = ATOMIC_INIT(atomic_value);
360 
361 	atomic_value++;
362 	atomic_inc(&atomic_var);
363 
364 	zassert_true(atomic_value == atomic_get(&atomic_var),
365 		"max signed overflow mismatch: %lx/%lx",
366 		atomic_value, atomic_get(&atomic_var));
367 	zassert_true(atomic_value == (atomic_val_t)overflowed_value,
368 		"unexpected value after overflow: %lx, expected: %lx",
369 		atomic_value, (atomic_val_t)overflowed_value);
370 
371 	/* Check overflow over max unsigned value */
372 	atomic_value = -1;
373 	atomic_var = ATOMIC_INIT(atomic_value);
374 
375 	atomic_value++;
376 	atomic_inc(&atomic_var);
377 
378 	zassert_true(atomic_value == atomic_get(&atomic_var),
379 		"max unsigned overflow mismatch: %lx/%lx",
380 		atomic_value, atomic_get(&atomic_var));
381 	zassert_true(atomic_value == 0,
382 		"unexpected value after overflow: %lx, expected: 0",
383 		atomic_value);
384 }
385 
386 extern void *common_setup(void);
387 ZTEST_SUITE(atomic, NULL, common_setup, NULL, NULL, NULL);
388 /**
389  * @}
390  */
391