1 /*
2  * Copyright (c) 2018,2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/tc_util.h>
7 #include <zephyr/ztest.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/spinlock.h>
10 
11 BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS > 1);
12 
13 #define CPU1_STACK_SIZE 1024
14 
15 K_THREAD_STACK_DEFINE(cpu1_stack, CPU1_STACK_SIZE);
16 struct k_thread cpu1_thread;
17 
18 static struct k_spinlock bounce_lock;
19 
20 volatile int bounce_owner, bounce_done;
21 volatile int trylock_failures;
22 volatile int trylock_successes;
23 
24 /**
25  * @brief Tests for spinlock
26  *
27  * @defgroup kernel_spinlock_tests Spinlock Tests
28  *
29  * @ingroup all_tests
30  *
31  * @{
32  * @}
33  */
34 
35 /**
36  * @brief Test basic spinlock
37  *
38  * @ingroup kernel_spinlock_tests
39  *
40  * @see k_spin_lock(), k_spin_unlock()
41  */
ZTEST(spinlock,test_spinlock_basic)42 ZTEST(spinlock, test_spinlock_basic)
43 {
44 	k_spinlock_key_t key;
45 	static struct k_spinlock l;
46 
47 	zassert_true(!z_spin_is_locked(&l), "Spinlock initialized to locked");
48 
49 	key = k_spin_lock(&l);
50 
51 	zassert_true(z_spin_is_locked(&l), "Spinlock failed to lock");
52 
53 	k_spin_unlock(&l, key);
54 
55 	zassert_true(!z_spin_is_locked(&l), "Spinlock failed to unlock");
56 }
57 
bounce_once(int id,bool trylock)58 static void bounce_once(int id, bool trylock)
59 {
60 	int ret;
61 	int i, locked;
62 	k_spinlock_key_t key;
63 
64 	/* Take the lock, check last owner and release if it was us.
65 	 * Wait for us to get the lock "after" another CPU
66 	 */
67 	locked = 0;
68 	for (i = 0; i < 10000; i++) {
69 		if (trylock) {
70 			ret = k_spin_trylock(&bounce_lock, &key);
71 			if (ret == -EBUSY) {
72 				trylock_failures++;
73 				continue;
74 			}
75 			trylock_successes++;
76 		} else {
77 			key = k_spin_lock(&bounce_lock);
78 		}
79 
80 		if (bounce_owner != id) {
81 			locked = 1;
82 			break;
83 		}
84 
85 		k_spin_unlock(&bounce_lock, key);
86 		k_busy_wait(100);
87 	}
88 
89 	if (!locked && bounce_done) {
90 		return;
91 	}
92 
93 	zassert_true(locked, "Other cpu did not get lock in 10000 tries");
94 
95 	/* Mark us as the owner, spin for a while validating that we
96 	 * never see another owner write to the protected data.
97 	 */
98 	bounce_owner = id;
99 
100 	for (i = 0; i < 5; i++) {
101 		zassert_true(bounce_owner == id, "Locked data changed");
102 		k_busy_wait(1);
103 	}
104 
105 	/* Release the lock */
106 	k_spin_unlock(&bounce_lock, key);
107 }
108 
cpu1_fn(void * p1,void * p2,void * p3)109 static void cpu1_fn(void *p1, void *p2, void *p3)
110 {
111 	ARG_UNUSED(p1);
112 	ARG_UNUSED(p2);
113 	ARG_UNUSED(p3);
114 
115 	while (!bounce_done) {
116 		bounce_once(4321, false);
117 	}
118 }
119 
120 /**
121  * @brief Test spinlock with bounce
122  *
123  * @ingroup kernel_spinlock_tests
124  *
125  * @see arch_cpu_start()
126  */
ZTEST(spinlock,test_spinlock_bounce)127 ZTEST(spinlock, test_spinlock_bounce)
128 {
129 	int i;
130 
131 	k_thread_create(&cpu1_thread, cpu1_stack, CPU1_STACK_SIZE,
132 			cpu1_fn, NULL, NULL, NULL,
133 			0, 0, K_NO_WAIT);
134 
135 	k_busy_wait(10);
136 
137 	for (i = 0; i < 10000; i++) {
138 		bounce_once(1234, false);
139 	}
140 
141 	bounce_done = 1;
142 
143 	k_thread_join(&cpu1_thread, K_FOREVER);
144 }
145 
146 /**
147  * @brief Test basic mutual exclusion using interrupt masking
148  *
149  * @details
150  * - Spinlocks can be initialized at run-time.
151  * - Spinlocks in uniprocessor context should achieve mutual exclusion using
152  *   interrupt masking.
153  *
154  * @ingroup kernel_spinlock_tests
155  *
156  * @see k_spin_lock(), k_spin_unlock()
157  */
ZTEST(spinlock,test_spinlock_mutual_exclusion)158 ZTEST(spinlock, test_spinlock_mutual_exclusion)
159 {
160 	k_spinlock_key_t key;
161 	static struct k_spinlock lock_runtime;
162 	unsigned int irq_key;
163 
164 	(void)memset(&lock_runtime, 0, sizeof(lock_runtime));
165 
166 	key = k_spin_lock(&lock_runtime);
167 
168 	zassert_true(z_spin_is_locked(&lock_runtime), "Spinlock failed to lock");
169 
170 	/* check irq has not locked */
171 	zassert_true(arch_irq_unlocked(key.key),
172 			"irq should be first locked!");
173 
174 	/*
175 	 * We make irq locked nested to check if interrupt
176 	 * disable happened or not.
177 	 */
178 	irq_key = arch_irq_lock();
179 
180 	/* check irq has already locked */
181 	zassert_false(arch_irq_unlocked(irq_key),
182 			"irq should be already locked!");
183 
184 	arch_irq_unlock(irq_key);
185 
186 	k_spin_unlock(&lock_runtime, key);
187 
188 	zassert_true(!z_spin_is_locked(&lock_runtime), "Spinlock failed to unlock");
189 }
190 
trylock_fn(void * p1,void * p2,void * p3)191 static void trylock_fn(void *p1, void *p2, void *p3)
192 {
193 	ARG_UNUSED(p1);
194 	ARG_UNUSED(p2);
195 	ARG_UNUSED(p3);
196 
197 	while (!bounce_done) {
198 		bounce_once(4321, true);
199 	}
200 }
201 
202 /**
203  * @brief Test k_spin_trylock()
204  *
205  * @ingroup kernel_spinlock_tests
206  *
207  * @see k_spin_trylock()
208  */
ZTEST(spinlock,test_trylock)209 ZTEST(spinlock, test_trylock)
210 {
211 	int i;
212 
213 	k_thread_create(&cpu1_thread, cpu1_stack, CPU1_STACK_SIZE,
214 			trylock_fn, NULL, NULL, NULL,
215 			0, 0, K_NO_WAIT);
216 
217 	k_busy_wait(10);
218 
219 	for (i = 0; i < 10000; i++) {
220 		bounce_once(1234, true);
221 	}
222 
223 	bounce_done = 1;
224 
225 	k_thread_join(&cpu1_thread, K_FOREVER);
226 
227 	zassert_true(trylock_failures > 0);
228 	zassert_true(trylock_successes > 0);
229 }
230 
before(void * ctx)231 static void before(void *ctx)
232 {
233 	ARG_UNUSED(ctx);
234 
235 	bounce_done = 0;
236 	bounce_owner = 0;
237 	trylock_failures = 0;
238 	trylock_successes = 0;
239 }
240 
241 ZTEST_SUITE(spinlock, NULL, NULL, before, NULL, NULL);
242