1 /*
2  * Copyright (c) 2019 Nordic Semiconductor ASA.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/ztest.h>
8 #include <zephyr/arch/cpu.h>
9 #include <cmsis_core.h>
10 #include <zephyr/kernel_structs.h>
11 #include <zephyr/sys/barrier.h>
12 #include <offsets_short_arch.h>
13 #include <ksched.h>
14 
15 #if !defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) && \
16 	!defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
17 #error "Unsupported architecture"
18 #endif
19 
20 #define PRIORITY 0
21 #define BASEPRI_MODIFIED_1 0x20
22 #define BASEPRI_MODIFIED_2 0x40
23 #define SWAP_RETVAL        0x1234
24 
25 #ifndef EXC_RETURN_FTYPE
26 /* bit [4] allocate stack for floating-point context: 0=done 1=skipped  */
27 #define EXC_RETURN_FTYPE           (0x00000010UL)
28 #endif
29 
30 #if defined(CONFIG_ARMV8_1_M_MAINLINE)
31 /*
32  * For ARMv8.1-M, the FPSCR[18:16] LTPSIZE field may always read 0b010 if MVE
33  * is not implemented, so mask it when validating the value of the FPSCR.
34  */
35 #define FPSCR_MASK		(~FPU_FPDSCR_LTPSIZE_Msk)
36 #else
37 #define FPSCR_MASK		(0xffffffffU)
38 #endif
39 
40 extern void z_move_thread_to_end_of_prio_q(struct k_thread *thread);
41 
42 static struct k_thread alt_thread;
43 static K_THREAD_STACK_DEFINE(alt_thread_stack, 1024);
44 
45 /* Status variable to indicate that context-switch has occurred. */
46 bool volatile switch_flag;
47 
48 struct k_thread *p_ztest_thread;
49 
50 _callee_saved_t ztest_thread_callee_saved_regs_container;
51 int ztest_swap_return_val;
52 
53 /* Arbitrary values for the callee-saved registers,
54  * enforced in the beginning of the test.
55  */
56 const _callee_saved_t ztest_thread_callee_saved_regs_init = {
57 	.v1 = 0x12345678, .v2 = 0x23456789, .v3 = 0x3456789a, .v4 = 0x456789ab,
58 	.v5 = 0x56789abc, .v6 = 0x6789abcd, .v7 = 0x789abcde, .v8 = 0x89abcdef
59 };
60 
load_callee_saved_regs(const _callee_saved_t * regs)61 static void load_callee_saved_regs(const _callee_saved_t *regs)
62 {
63 	/* Load the callee-saved registers with given values */
64 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
65 	__asm__ volatile (
66 		"mov r1, r7;\n\t"
67 		"mov r0, %0;\n\t"
68 		"add r0, #16;\n\t"
69 		"ldmia r0!, {r4-r7};\n\t"
70 		"mov r8, r4;\n\t"
71 		"mov r9, r5;\n\t"
72 		"mov r10, r6;\n\t"
73 		"mov r11, r7;\n\t"
74 		"sub r0, #32;\n\t"
75 		"ldmia r0!, {r4-r7};\n\t"
76 		"mov r7, r1;\n\t"
77 		: /* no output */
78 		: "r" (regs)
79 		: "memory", "r1", "r0"
80 	);
81 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
82 	__asm__ volatile (
83 		"mov r1, r7;\n\t"
84 		"ldmia %0, {v1-v8};\n\t"
85 		"mov r7, r1;\n\t"
86 		: /* no output */
87 		: "r" (regs)
88 		: "memory", "r1"
89 	);
90 #endif
91 	barrier_dsync_fence_full();
92 }
93 
verify_callee_saved(const _callee_saved_t * src,const _callee_saved_t * dst)94 static void verify_callee_saved(const _callee_saved_t *src,
95 		const _callee_saved_t *dst)
96 {
97 	/* Verify callee-saved registers are as expected */
98 	zassert_true((src->v1 == dst->v1)
99 			&& (src->v2 == dst->v2)
100 			&& (src->v3 == dst->v3)
101 			&& (src->v4 == dst->v4)
102 			&& (src->v5 == dst->v5)
103 			&& (src->v6 == dst->v6)
104 			&& (src->v7 == dst->v7)
105 			&& (src->v8 == dst->v8),
106 		" got: 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x\n"
107 		" expected:  0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x\n",
108 		src->v1,
109 		src->v2,
110 		src->v3,
111 		src->v4,
112 		src->v5,
113 		src->v6,
114 		src->v7,
115 		src->v8,
116 		dst->v1,
117 		dst->v2,
118 		dst->v3,
119 		dst->v4,
120 		dst->v5,
121 		dst->v6,
122 		dst->v7,
123 		dst->v8
124 	);
125 }
126 
127 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
128 
129 /* Create the alternative thread with K_FP_REGS options */
130 #define ALT_THREAD_OPTIONS K_FP_REGS
131 
132 /* Arbitrary values for the floating-point callee-saved registers */
133 struct _preempt_float ztest_thread_fp_callee_saved_regs = {
134 	.s16 = 0x11111111, .s17 = 0x22222222,
135 	.s18 = 0x33333333, .s19 = 0x44444444,
136 	.s20 = 0x55555555, .s21 = 0x66666666,
137 	.s22 = 0x77777777, .s23 = 0x88888888,
138 	.s24 = 0x99999999, .s25 = 0xaaaaaaaa,
139 	.s26 = 0xbbbbbbbb, .s27 = 0xcccccccc,
140 	.s28 = 0xdddddddd, .s29 = 0xeeeeeeee,
141 	.s30 = 0xffffffff, .s31 = 0x00000000,
142 };
143 
load_fp_callee_saved_regs(const volatile struct _preempt_float * regs)144 static void load_fp_callee_saved_regs(
145 	const volatile struct _preempt_float *regs)
146 {
147 	__asm__ volatile (
148 		"vldmia %0, {s16-s31};\n\t"
149 		:
150 		: "r" (regs)
151 		: "memory"
152 		);
153 	barrier_dsync_fence_full();
154 }
155 
verify_fp_callee_saved(const struct _preempt_float * src,const struct _preempt_float * dst)156 static void verify_fp_callee_saved(const struct _preempt_float *src,
157 		const struct _preempt_float *dst)
158 {
159 	/* Verify FP callee-saved registers are as expected */
160 	zassert_true((src->s16 == dst->s16)
161 			&& (src->s17 == dst->s17)
162 			&& (src->s18 == dst->s18)
163 			&& (src->s19 == dst->s19)
164 			&& (src->s20 == dst->s20)
165 			&& (src->s21 == dst->s21)
166 			&& (src->s22 == dst->s22)
167 			&& (src->s23 == dst->s23)
168 			&& (src->s24 == dst->s24)
169 			&& (src->s25 == dst->s25)
170 			&& (src->s26 == dst->s26)
171 			&& (src->s27 == dst->s27)
172 			&& (src->s28 == dst->s28)
173 			&& (src->s29 == dst->s29)
174 			&& (src->s30 == dst->s30)
175 			&& (src->s31 == dst->s31),
176 		" got: 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x"
177 		" 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x\n"
178 		" expected:  0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x"
179 		" 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x 0x%0x\n",
180 		(double)src->s16,
181 		(double)src->s17,
182 		(double)src->s18,
183 		(double)src->s19,
184 		(double)src->s20,
185 		(double)src->s21,
186 		(double)src->s22,
187 		(double)src->s23,
188 		(double)src->s24,
189 		(double)src->s25,
190 		(double)src->s26,
191 		(double)src->s27,
192 		(double)src->s28,
193 		(double)src->s29,
194 		(double)src->s30,
195 		(double)src->s31,
196 		(double)dst->s16,
197 		(double)dst->s17,
198 		(double)dst->s18,
199 		(double)dst->s19,
200 		(double)dst->s20,
201 		(double)dst->s21,
202 		(double)dst->s22,
203 		(double)dst->s23,
204 		(double)dst->s24,
205 		(double)dst->s25,
206 		(double)dst->s26,
207 		(double)dst->s27,
208 		(double)dst->s28,
209 		(double)dst->s29,
210 		(double)dst->s30,
211 		(double)dst->s31
212 	);
213 }
214 
215 #else
216 /* No options passed */
217 #define ALT_THREAD_OPTIONS 0
218 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
219 
alt_thread_entry(void * p1,void * p2,void * p3)220 static void alt_thread_entry(void *p1, void *p2, void *p3)
221 {
222 	ARG_UNUSED(p1);
223 	ARG_UNUSED(p2);
224 	ARG_UNUSED(p3);
225 
226 	int init_flag, post_flag;
227 
228 	/* Lock interrupts to make sure we get preempted only when
229 	 * it is required by the test */
230 	(void)irq_lock();
231 
232 	init_flag = switch_flag;
233 	zassert_true(init_flag == false,
234 		"Alternative thread: switch flag not false on thread entry\n");
235 
236 	/* Set switch flag */
237 	switch_flag = true;
238 
239 #if defined(CONFIG_NO_OPTIMIZATIONS)
240 	zassert_true(p_ztest_thread->arch.basepri == 0,
241 		"ztest thread basepri not preserved in swap-out\n");
242 #else
243 	/* Verify that the main test thread has the correct value
244 	 * for state variable thread.arch.basepri (set before swap).
245 	 */
246 	zassert_true(p_ztest_thread->arch.basepri == BASEPRI_MODIFIED_1,
247 		"ztest thread basepri not preserved in swap-out\n");
248 
249 	/* Verify original swap return value (set by arch_swap() */
250 	zassert_true(p_ztest_thread->arch.swap_return_value == -EAGAIN,
251 		"ztest thread swap-return-value not preserved in swap-out\n");
252 #endif
253 
254 	/* Verify that the main test thread (ztest) has stored the callee-saved
255 	 * registers properly in its corresponding callee-saved container.
256 	 */
257 	verify_callee_saved(
258 		(const _callee_saved_t *)&p_ztest_thread->callee_saved,
259 		&ztest_thread_callee_saved_regs_container);
260 
261 	/* Zero the container of the callee-saved registers, to validate,
262 	 * later, that it is populated properly.
263 	 */
264 	memset(&ztest_thread_callee_saved_regs_container,
265 		0, sizeof(_callee_saved_t));
266 
267 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
268 
269 	/*  Verify that the _current_ (alt) thread is initialized with FPCA cleared. */
270 	zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0,
271 		"CONTROL.FPCA is not cleared at initialization: 0x%x\n",
272 		__get_CONTROL());
273 
274 	/* Verify that the _current_ (alt) thread is
275 	 * initialized with EXC_RETURN.Ftype set
276 	 */
277 	zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
278 		"Alt thread FPCA flag not clear at initialization\n");
279 #if defined(CONFIG_MPU_STACK_GUARD)
280 	/* Alt thread is created with K_FP_REGS set, so we
281 	 * expect lazy stacking and long guard to be enabled.
282 	 */
283 	zassert_true((arch_current_thread()->arch.mode &
284 		Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
285 		"Alt thread MPU GUAR DFLOAT flag not set at initialization\n");
286 	zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
287 		"Alt thread K_FP_REGS not set at initialization\n");
288 	zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
289 		"Lazy FP Stacking not set at initialization\n");
290 #endif
291 
292 
293 	/* Verify that the _current_ (alt) thread is initialized with FPSCR cleared. */
294 	zassert_true((__get_FPSCR() & FPSCR_MASK) == 0,
295 		"(Alt thread) FPSCR is not cleared at initialization: 0x%x\n", __get_FPSCR());
296 
297 	zassert_true((p_ztest_thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
298 		"ztest thread mode Ftype flag not updated at swap-out: 0x%0x\n",
299 		p_ztest_thread->arch.mode);
300 
301 	/* Verify that the main test thread (ztest) has stored the FP
302 	 *  callee-saved registers properly in its corresponding FP
303 	 *  callee-saved container.
304 	 */
305 	verify_fp_callee_saved((const struct _preempt_float *)
306 		&p_ztest_thread->arch.preempt_float,
307 		&ztest_thread_fp_callee_saved_regs);
308 
309 	/* Zero the container of the FP callee-saved registers, to validate,
310 	 * later, that it is populated properly.
311 	 */
312 	memset(&ztest_thread_fp_callee_saved_regs,
313 		0, sizeof(ztest_thread_fp_callee_saved_regs));
314 
315 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
316 
317 	/* Modify the arch.basepri flag of the main test thread, to verify,
318 	 * later, that this is passed properly to the BASEPRI.
319 	 */
320 	p_ztest_thread->arch.basepri = BASEPRI_MODIFIED_2;
321 
322 #if !defined(CONFIG_NO_OPTIMIZATIONS)
323 	/* Modify the arch.swap_return_value flag of the main test thread,
324 	 * to verify later, that this value is properly returned by swap.
325 	 */
326 	p_ztest_thread->arch.swap_return_value = SWAP_RETVAL;
327 #endif
328 
329 	z_move_thread_to_end_of_prio_q(arch_current_thread());
330 
331 	/* Modify the callee-saved registers by zero-ing them.
332 	 * The main test thread will, later, assert that they
333 	 * are restored to their original values upon context
334 	 * switch.
335 	 *
336 	 * Note: preserve r7 register (frame pointer).
337 	 */
338 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
339 	__asm__ volatile (
340 		/* Stash r4-r11 in stack, they will be restored much later in
341 		 * another inline asm -- that should be reworked since stack
342 		 * must be balanced when we leave any inline asm. We could
343 		 * use simply an alternative stack for storing them instead
344 		 * of the function's stack.
345 		 */
346 		"push {r4-r7};\n\t"
347 		"mov r2, r8;\n\t"
348 		"mov r3, r9;\n\t"
349 		"push {r2, r3};\n\t"
350 		"mov r2, r10;\n\t"
351 		"mov r3, r11;\n\t"
352 		"push {r2, r3};\n\t"
353 
354 		/* Save r0 and r7 since we want to preserve them but they
355 		 * are used below: r0 is used as a copy of struct pointer
356 		 * we don't want to mess and r7 is the frame pointer which
357 		 * we must not clobber it.
358 		 */
359 		"push {r0, r7};\n\t"
360 
361 		/* Load struct into r4-r11 */
362 		"mov r0, %0;\n\t"
363 		"add r0, #16;\n\t"
364 		"ldmia r0!, {r4-r7};\n\t"
365 		"mov r8, r4;\n\t"
366 		"mov r9, r5;\n\t"
367 		"mov r10, r6;\n\t"
368 		"mov r11, r7;\n\t"
369 		"sub r0, #32;\n\t"
370 		"ldmia r0!, {r4-r7};\n\t"
371 
372 		/* Restore r0 and r7 */
373 		"pop {r0, r7};\n\t"
374 
375 		: /* no output */
376 		: "r" (&ztest_thread_callee_saved_regs_container)
377 		: "memory", "r0", "r2", "r3"
378 	);
379 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
380 	__asm__ volatile (
381 		"push {v1-v8};\n\t"
382 		"push {r0, r1};\n\t"
383 		"mov r0, r7;\n\t"
384 		"ldmia %0, {v1-v8};\n\t"
385 		"mov r7, r0;\n\t"
386 		"pop {r0, r1};\n\t"
387 		: /* no output */
388 		: "r" (&ztest_thread_callee_saved_regs_container)
389 		: "memory", "r0"
390 	);
391 #endif
392 
393 	/* Manually trigger a context-switch, to swap-out
394 	 * the alternative test thread.
395 	 */
396 	barrier_dmem_fence_full();
397 	SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
398 	irq_unlock(0);
399 
400 	/* Restore stacked callee-saved registers */
401 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
402 	__asm__ volatile (
403 		"pop {r4, r5, r6, r7};\n\t"
404 		"mov r8, r4;\n\t"
405 		"mov r9, r5;\n\t"
406 		"mov r10, r6;\n\t"
407 		"mov r11, r7;\n\t"
408 		"pop {r4, r5, r6, r7};\n\t"
409 		: : :
410 	);
411 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
412 	__asm__ volatile (
413 		"pop {v1-v8};\n\t"
414 		: : :
415 	);
416 #endif
417 
418 	/* Verify that the main test thread has managed to resume, before
419 	 * we return to the alternative thread (we verify this by checking
420 	 * the status of the switch flag; the main test thread will clear
421 	 * it when it is swapped-back in.
422 	 */
423 	post_flag = switch_flag;
424 	zassert_true(post_flag == false,
425 		"Alternative thread: switch flag not false on thread exit\n");
426 }
427 
428 #if !defined(CONFIG_NO_OPTIMIZATIONS)
arch_swap_wrapper(void)429 static int __noinline arch_swap_wrapper(void)
430 {
431 	return arch_swap(BASEPRI_MODIFIED_1);
432 }
433 #endif
434 
ZTEST(arm_thread_swap,test_arm_thread_swap)435 ZTEST(arm_thread_swap, test_arm_thread_swap)
436 {
437 	int test_flag;
438 
439 	/* Main test thread (ztest)
440 	 *
441 	 * Simulating initial conditions:
442 	 * - set arbitrary values at the callee-saved registers
443 	 * - set arbitrary values at the FP callee-saved registers,
444 	 *   if building with CONFIG_FPU/CONFIG_FPU_SHARING
445 	 * - zero the thread's callee-saved data structure
446 	 * - set thread's priority same as the alternative test thread
447 	 */
448 
449 	/* Load the callee-saved registers with initial arbitrary values
450 	 * (from ztest_thread_callee_saved_regs_init)
451 	 */
452 	load_callee_saved_regs(&ztest_thread_callee_saved_regs_init);
453 
454 	k_thread_priority_set(arch_current_thread(), K_PRIO_COOP(PRIORITY));
455 
456 	/* Export current thread's callee-saved registers pointer
457 	 * and arch.basepri variable pointer, into global pointer
458 	 * variables, so they can be easily accessible by other
459 	 * (alternative) test thread.
460 	 */
461 	p_ztest_thread = arch_current_thread();
462 
463 	/* Confirm initial conditions before starting the test. */
464 	test_flag = switch_flag;
465 	zassert_true(test_flag == false,
466 		"Switch flag not initialized properly\n");
467 	zassert_true(arch_current_thread()->arch.basepri == 0,
468 		"Thread BASEPRI flag not clear at thread start\n");
469 	/* Verify, also, that the interrupts are unlocked. */
470 #if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
471 	zassert_true(__get_BASEPRI() == 0,
472 		"initial BASEPRI not zero\n");
473 #else
474 	/* For Cortex-M Baseline architecture, we verify that
475 	 * the interrupt lock is disabled.
476 	 */
477 	 zassert_true(__get_PRIMASK() == 0,
478 	 "initial PRIMASK not zero\n");
479 #endif /* CONFIG_CPU_CORTEX_M_HAS_BASEPRI */
480 
481 #if defined(CONFIG_USERSPACE)
482 	/* The main test thread is set to run in privilege mode */
483 	zassert_false((arch_is_user_context()),
484 		"Main test thread does not start in privilege mode\n");
485 
486 	/* Assert that the mode status variable indicates privilege mode */
487 	zassert_true((arch_current_thread()->arch.mode & CONTROL_nPRIV_Msk) == 0,
488 		"Thread nPRIV flag not clear for supervisor thread: 0x%0x\n",
489 		arch_current_thread()->arch.mode);
490 #endif /* CONFIG_USERSPACE */
491 
492 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
493 	/* The main test thread is not (yet) actively using the FP registers */
494 	zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
495 		"Thread Ftype flag not set at initialization 0x%0x\n",
496 		arch_current_thread()->arch.mode);
497 
498 	/* Verify that the main test thread is initialized with FPCA cleared. */
499 	zassert_true((__get_CONTROL() & CONTROL_FPCA_Msk) == 0,
500 		"CONTROL.FPCA is not cleared at initialization: 0x%x\n",
501 		__get_CONTROL());
502 	/* Verify that the main test thread is initialized with FPSCR cleared. */
503 	zassert_true((__get_FPSCR() & FPSCR_MASK) == 0,
504 		"FPSCR is not cleared at initialization: 0x%x\n", __get_FPSCR());
505 
506 	/* Clear the thread's floating-point callee-saved registers' container.
507 	 * The container will, later, be populated by the swap mechanism.
508 	 */
509 	memset(&arch_current_thread()->arch.preempt_float, 0,
510 		sizeof(struct _preempt_float));
511 
512 	/* Randomize the FP callee-saved registers at test initialization */
513 	load_fp_callee_saved_regs(&ztest_thread_fp_callee_saved_regs);
514 
515 	/* Modify bit-0 of the FPSCR - will be checked again upon swap-in. */
516 	zassert_true((__get_FPSCR() & 0x1) == 0,
517 		"FPSCR bit-0 has been set before testing it\n");
518 	__set_FPSCR(__get_FPSCR() | 0x1);
519 
520 	/* The main test thread is using the FP registers, but the .mode
521 	 * flag is not updated until the next context switch.
522 	 */
523 	zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) != 0,
524 		"Thread Ftype flag not set at initialization\n");
525 #if defined(CONFIG_MPU_STACK_GUARD)
526 	zassert_true((arch_current_thread()->arch.mode &
527 		Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0,
528 		"Thread MPU GUAR DFLOAT flag not clear at initialization\n");
529 	zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) == 0,
530 		"Thread K_FP_REGS not clear at initialization\n");
531 	zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) == 0,
532 		"Lazy FP Stacking not clear at initialization\n");
533 #endif
534 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
535 
536 	/* Create an alternative (supervisor) testing thread */
537 	k_thread_create(&alt_thread,
538 		alt_thread_stack,
539 		K_THREAD_STACK_SIZEOF(alt_thread_stack),
540 		alt_thread_entry,
541 		NULL, NULL, NULL,
542 		K_PRIO_COOP(PRIORITY), ALT_THREAD_OPTIONS,
543 		K_NO_WAIT);
544 
545 	/* Verify context-switch has not occurred. */
546 	test_flag = switch_flag;
547 	zassert_true(test_flag == false,
548 		"Switch flag incremented when it should not have\n");
549 
550 	/* Prepare to force a context switch to the alternative thread,
551 	 * by manually adding the current thread to the end of the queue,
552 	 * so it will be context switched-out.
553 	 *
554 	 * Lock interrupts to make sure we get preempted only when it is
555 	 * explicitly required by the test.
556 	 */
557 	(void)irq_lock();
558 	z_move_thread_to_end_of_prio_q(arch_current_thread());
559 
560 	/* Clear the thread's callee-saved registers' container.
561 	 * The container will, later, be populated by the swap
562 	 * mechanism.
563 	 */
564 	memset(&arch_current_thread()->callee_saved, 0, sizeof(_callee_saved_t));
565 
566 	/* Verify context-switch has not occurred yet. */
567 	test_flag = switch_flag;
568 	zassert_true(test_flag == false,
569 		"Switch flag incremented by unexpected context-switch.\n");
570 
571 	/* Store the callee-saved registers to some global memory
572 	 * accessible to the alternative testing thread. That
573 	 * thread is going to verify that the callee-saved regs
574 	 * are successfully loaded into the thread's callee-saved
575 	 * registers' container.
576 	 */
577 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
578 	__asm__ volatile (
579 		"push {r0, r1, r2, r3};\n\t"
580 		"mov r1, %0;\n\t"
581 		"stmia r1!, {r4-r7};\n\t"
582 		"mov r2, r8;\n\t"
583 		"mov r3, r9;\n\t"
584 		"stmia r1!, {r2-r3};\n\t"
585 		"mov r2, r10;\n\t"
586 		"mov r3, r11;\n\t"
587 		"stmia r1!, {r2-r3};\n\t"
588 		"pop {r0, r1, r2, r3};\n\t"
589 		:	: "r" (&ztest_thread_callee_saved_regs_container)
590 		: "memory"
591 	);
592  #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
593 	__asm__ volatile (
594 		"stmia %0, {v1-v8};\n\t"
595 		:
596 		: "r" (&ztest_thread_callee_saved_regs_container)
597 		: "memory"
598 	);
599 #endif
600 
601 	/* Manually trigger a context-switch to swap-out the current thread.
602 	 * Request a return to a different interrupt lock state.
603 	 */
604 	barrier_dmem_fence_full();
605 
606 #if defined(CONFIG_NO_OPTIMIZATIONS)
607 	SCB->ICSR |= SCB_ICSR_PENDSVSET_Msk;
608 	irq_unlock(0);
609 	/* The thread is now swapped-back in. */
610 
611 #else/* CONFIG_NO_OPTIMIZATIONS */
612 
613 	/* Fake a different irq_unlock key when performing swap.
614 	 * This will be verified by the alternative test thread.
615 	 *
616 	 * Force an indirect call to arch_swap() to prevent the compiler from
617 	 * changing the saved callee registers as arch_swap() is inlined.
618 	 */
619 	register int swap_return_val __asm__("r0") = arch_swap_wrapper();
620 
621 #endif /* CONFIG_NO_OPTIMIZATIONS */
622 
623 	/* Dump callee-saved registers to memory. */
624 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
625 	__asm__ volatile (
626 		"push {r0, r1, r2, r3};\n\t"
627 		"mov r1, %0;\n\t"
628 		"stmia r1!, {r4-r7};\n\t"
629 		"mov r2, r8;\n\t"
630 		"mov r3, r9;\n\t"
631 		"stmia r1!, {r2-r3};\n\t"
632 		"mov r2, r10;\n\t"
633 		"mov r3, r11;\n\t"
634 		"stmia r1!, {r2-r3};\n\t"
635 		"pop {r0, r1, r2, r3};\n\t"
636 		:	: "r" (&ztest_thread_callee_saved_regs_container)
637 		: "memory"
638 	);
639  #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
640 	__asm__ volatile (
641 		"stmia %0, {v1-v8};\n\t"
642 		:
643 		: "r" (&ztest_thread_callee_saved_regs_container)
644 		: "memory"
645 	);
646 #endif
647 
648 #if !defined(CONFIG_NO_OPTIMIZATIONS)
649 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
650 	/* Note: ARMv6-M will always write back the base register,
651 	 * so we make sure we preserve the state of the register
652 	 * used, as base register in the Store Multiple instruction.
653 	 * We also enforce write-back to suppress assembler warning.
654 	 */
655 	__asm__ volatile (
656 		"push {r0, r1, r2, r3, r4, r5, r6, r7};\n\t"
657 		"stm %0!, {%1};\n\t"
658 		"pop {r0, r1, r2, r3, r4, r5, r6, r7};\n\t"
659 		:
660 		: "r" (&ztest_swap_return_val), "r" (swap_return_val)
661 		: "memory"
662 	);
663 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
664 	__asm__ volatile (
665 		"stm %0, {%1};\n\t"
666 		:
667 		: "r" (&ztest_swap_return_val), "r" (swap_return_val)
668 		: "memory"
669 	);
670 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
671 #endif
672 
673 
674 	/* After swap-back, verify that the callee-saved registers loaded,
675 	 * look exactly as what is located in the respective callee-saved
676 	 * container of the thread.
677 	 */
678 	verify_callee_saved(
679 		&ztest_thread_callee_saved_regs_container,
680 		&arch_current_thread()->callee_saved);
681 
682 	/* Verify context-switch did occur. */
683 	test_flag = switch_flag;
684 	zassert_true(test_flag == true,
685 		"Switch flag not incremented as expected %u\n",
686 		switch_flag);
687 	/* Clear the switch flag to signal that the main test thread
688 	 * has been successfully swapped-in, as expected by the test.
689 	 */
690 	switch_flag = false;
691 
692 	/* Verify that the arch.basepri flag is cleared, after
693 	 * the alternative thread modified it, since the thread
694 	 * is now switched back in.
695 	 */
696 	zassert_true(arch_current_thread()->arch.basepri == 0,
697 		"arch.basepri value not in accordance with the update\n");
698 
699 #if defined(CONFIG_CPU_CORTEX_M_HAS_BASEPRI)
700 	/* Verify that the BASEPRI register is updated during the last
701 	 * swap-in of the thread.
702 	 */
703 	zassert_true(__get_BASEPRI() == BASEPRI_MODIFIED_2,
704 		"BASEPRI not in accordance with the update: 0x%0x\n",
705 		__get_BASEPRI());
706 #else
707 	/* For Cortex-M Baseline architecture, we verify that
708 	 * the interrupt lock is enabled.
709 	 */
710 	 zassert_true(__get_PRIMASK() != 0,
711 	 "PRIMASK not in accordance with the update: 0x%0x\n",
712 	 __get_PRIMASK());
713 #endif /* CONFIG_CPU_CORTEX_M_HAS_BASEPRI */
714 
715 #if !defined(CONFIG_NO_OPTIMIZATIONS)
716 	/* The thread is now swapped-back in. */
717 	zassert_equal(arch_current_thread()->arch.swap_return_value, SWAP_RETVAL,
718 		"Swap value not set as expected: 0x%x (0x%x)\n",
719 		arch_current_thread()->arch.swap_return_value, SWAP_RETVAL);
720 	zassert_equal(arch_current_thread()->arch.swap_return_value, ztest_swap_return_val,
721 		"Swap value not returned as expected 0x%x (0x%x)\n",
722 		arch_current_thread()->arch.swap_return_value, ztest_swap_return_val);
723 #endif
724 
725 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
726 	/* Dump callee-saved registers to memory. */
727 	__asm__ volatile (
728 		"vstmia %0, {s16-s31};\n\t"
729 		:
730 		: "r" (&ztest_thread_fp_callee_saved_regs)
731 		: "memory"
732 	);
733 
734 	/* After swap-back, verify that the FP callee-saved registers loaded,
735 	 * look exactly as what is located in the respective FP callee-saved
736 	 * container of the thread.
737 	 */
738 	verify_fp_callee_saved(
739 		&ztest_thread_fp_callee_saved_regs,
740 		&arch_current_thread()->arch.preempt_float);
741 
742 	/* Verify that the main test thread restored the FPSCR bit-0. */
743 	zassert_true((__get_FPSCR() & 0x1) == 0x1,
744 		"FPSCR bit-0 not restored at swap: 0x%x\n", __get_FPSCR());
745 
746 	/* The main test thread is using the FP registers, and the .mode
747 	 * flag and MPU GUARD flag are now updated.
748 	 */
749 	zassert_true((arch_current_thread()->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0,
750 		"Thread Ftype flag not cleared after main returned back\n");
751 #if defined(CONFIG_MPU_STACK_GUARD)
752 	zassert_true((arch_current_thread()->arch.mode &
753 		Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0,
754 		"Thread MPU GUARD FLOAT flag not set\n");
755 	zassert_true((arch_current_thread()->base.user_options & K_FP_REGS) != 0,
756 		"Thread K_FPREGS not set after main returned back\n");
757 	zassert_true((FPU->FPCCR & FPU_FPCCR_LSPEN_Msk) != 0,
758 		"Lazy FP Stacking not set after main returned back\n");
759 #endif
760 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
761 
762 }
763 /**
764  * @}
765  */
766