1 /*
2  * Copyright (c) 2013-2014 Wind River Systems, Inc.
3  * Copyright (c) 2021 Lexmark International, Inc.
4  * Copyright (c) 2023 Arm Limited
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 /**
10  * @file
11  * @brief New thread creation for ARM Cortex-M
12  *
13  * Core thread related primitives for the ARM Cortex-M
14  * processor architecture.
15  */
16 
17 #include <zephyr/kernel.h>
18 #include <zephyr/llext/symbol.h>
19 #include <ksched.h>
20 #include <zephyr/sys/barrier.h>
21 #include <stdbool.h>
22 #include <cmsis_core.h>
23 
24 #if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE)
25 #define FP_GUARD_EXTRA_SIZE (MPU_GUARD_ALIGN_AND_SIZE_FLOAT - MPU_GUARD_ALIGN_AND_SIZE)
26 #else
27 #define FP_GUARD_EXTRA_SIZE 0
28 #endif
29 
30 #ifndef EXC_RETURN_FTYPE
31 /* bit [4] allocate stack for floating-point context: 0=done 1=skipped  */
32 #define EXC_RETURN_FTYPE (0x00000010UL)
33 #endif
34 
35 /* Default last octet of EXC_RETURN, for threads that have not run yet.
36  * The full EXC_RETURN value will be e.g. 0xFFFFFFBC.
37  */
38 #if defined(CONFIG_ARM_NONSECURE_FIRMWARE)
39 #define DEFAULT_EXC_RETURN 0xBC;
40 #else
41 #define DEFAULT_EXC_RETURN 0xFD;
42 #endif
43 
44 #if !defined(CONFIG_MULTITHREADING)
45 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
46 #endif
47 
48 /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
49  * end of the stack, and thus reusable by the stack when not needed anymore.
50  *
51  * The initial context is an exception stack frame (ESF) since exiting the
52  * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
53  * an instruction address to jump to must always be set since the CPU always
54  * runs in thumb mode, the ESF expects the real address of the instruction,
55  * with the lsb *not* set (instructions are always aligned on 16 bit
56  * halfwords).  Since the compiler automatically sets the lsb of function
57  * addresses, we have to unset it manually before storing it in the 'pc' field
58  * of the ESF.
59  */
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)60 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack, char *stack_ptr,
61 		     k_thread_entry_t entry, void *p1, void *p2, void *p3)
62 {
63 	struct __basic_sf *iframe;
64 
65 #ifdef CONFIG_MPU_STACK_GUARD
66 #if defined(CONFIG_USERSPACE)
67 	if (z_stack_is_user_capable(stack)) {
68 		/* Guard area is carved-out of the buffer instead of reserved
69 		 * for stacks that can host user threads
70 		 */
71 		thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
72 		thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
73 	}
74 #endif /* CONFIG_USERSPACE */
75 #if FP_GUARD_EXTRA_SIZE > 0
76 	if ((thread->base.user_options & K_FP_REGS) != 0) {
77 		/* Larger guard needed due to lazy stacking of FP regs may
78 		 * overshoot the guard area without writing anything. We
79 		 * carve it out of the stack buffer as-needed instead of
80 		 * unconditionally reserving it.
81 		 */
82 		thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
83 		thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
84 	}
85 #endif /* FP_GUARD_EXTRA_SIZE */
86 #endif /* CONFIG_MPU_STACK_GUARD */
87 
88 	iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
89 #if defined(CONFIG_USERSPACE)
90 	if ((thread->base.user_options & K_USER) != 0) {
91 		iframe->pc = (uint32_t)arch_user_mode_enter;
92 	} else {
93 		iframe->pc = (uint32_t)z_thread_entry;
94 	}
95 #else
96 	iframe->pc = (uint32_t)z_thread_entry;
97 #endif
98 
99 	/* force ARM mode by clearing LSB of address */
100 	iframe->pc &= 0xfffffffe;
101 	iframe->a1 = (uint32_t)entry;
102 	iframe->a2 = (uint32_t)p1;
103 	iframe->a3 = (uint32_t)p2;
104 	iframe->a4 = (uint32_t)p3;
105 
106 	iframe->xpsr = 0x01000000UL; /* clear all, thumb bit is 1, even if RO */
107 
108 	thread->callee_saved.psp = (uint32_t)iframe;
109 	thread->arch.basepri = 0;
110 
111 #if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
112 	thread->arch.mode = 0;
113 #if defined(CONFIG_ARM_STORE_EXC_RETURN)
114 	thread->arch.mode_exc_return = DEFAULT_EXC_RETURN;
115 #endif
116 #if FP_GUARD_EXTRA_SIZE > 0
117 	if ((thread->base.user_options & K_FP_REGS) != 0) {
118 		thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
119 	}
120 #endif
121 #if defined(CONFIG_USERSPACE)
122 	thread->arch.priv_stack_start = 0;
123 #endif
124 #endif
125 	/*
126 	 * initial values in all other registers/thread entries are
127 	 * irrelevant.
128 	 */
129 }
130 
131 #if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
132 
z_arm_thread_stack_info_adjust(struct k_thread * thread,bool use_large_guard)133 static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread, bool use_large_guard)
134 {
135 	if (use_large_guard) {
136 		/* Switch to use a large MPU guard if not already. */
137 		if ((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) {
138 			/* Default guard size is used. Update required. */
139 			thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
140 #if defined(CONFIG_USERSPACE)
141 			if (thread->arch.priv_stack_start) {
142 				/* User thread */
143 				thread->arch.priv_stack_start += FP_GUARD_EXTRA_SIZE;
144 			} else
145 #endif /* CONFIG_USERSPACE */
146 			{
147 				/* Privileged thread */
148 				thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
149 				thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
150 			}
151 		}
152 	} else {
153 		/* Switch to use the default MPU guard size if not already. */
154 		if ((thread->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
155 			/* Large guard size is used. Update required. */
156 			thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
157 #if defined(CONFIG_USERSPACE)
158 			if (thread->arch.priv_stack_start) {
159 				/* User thread */
160 				thread->arch.priv_stack_start -= FP_GUARD_EXTRA_SIZE;
161 			} else
162 #endif /* CONFIG_USERSPACE */
163 			{
164 				/* Privileged thread */
165 				thread->stack_info.start -= FP_GUARD_EXTRA_SIZE;
166 				thread->stack_info.size += FP_GUARD_EXTRA_SIZE;
167 			}
168 		}
169 	}
170 }
171 
172 /*
173  * Adjust the MPU stack guard size together with the FPU
174  * policy and the stack_info values for the thread that is
175  * being switched in.
176  */
z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread * thread)177 uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
178 {
179 	if (((thread->base.user_options & K_FP_REGS) != 0) ||
180 	    ((thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)) {
181 		/* The thread has been pre-tagged (at creation or later) with
182 		 * K_FP_REGS, i.e. it is expected to be using the FPU registers
183 		 * (if not already). Activate lazy stacking and program a large
184 		 * MPU guard to safely detect privilege thread stack overflows.
185 		 *
186 		 * OR
187 		 * The thread is not pre-tagged with K_FP_REGS, but it has
188 		 * generated an FP context. Activate lazy stacking and
189 		 * program a large MPU guard to detect privilege thread
190 		 * stack overflows.
191 		 */
192 		FPU->FPCCR |= FPU_FPCCR_LSPEN_Msk;
193 
194 		z_arm_thread_stack_info_adjust(thread, true);
195 
196 		/* Tag the thread with K_FP_REGS */
197 		thread->base.user_options |= K_FP_REGS;
198 
199 		return MPU_GUARD_ALIGN_AND_SIZE_FLOAT;
200 	}
201 
202 	/* Thread is not pre-tagged with K_FP_REGS, and it has
203 	 * not been using the FPU. Since there is no active FPU
204 	 * context, de-activate lazy stacking and program the
205 	 * default MPU guard size.
206 	 */
207 	FPU->FPCCR &= (~FPU_FPCCR_LSPEN_Msk);
208 
209 	z_arm_thread_stack_info_adjust(thread, false);
210 
211 	return MPU_GUARD_ALIGN_AND_SIZE;
212 }
213 #endif
214 
215 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)216 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3)
217 {
218 
219 	/* Set up privileged stack before entering user mode */
220 	_current->arch.priv_stack_start = (uint32_t)z_priv_stack_find(_current->stack_obj);
221 #if defined(CONFIG_MPU_STACK_GUARD)
222 #if defined(CONFIG_THREAD_STACK_INFO)
223 	/* We're dropping to user mode which means the guard area is no
224 	 * longer used here, it instead is moved to the privilege stack
225 	 * to catch stack overflows there. Un-do the calculations done
226 	 * which accounted for memory borrowed from the thread stack.
227 	 */
228 #if FP_GUARD_EXTRA_SIZE > 0
229 	if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
230 		_current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
231 		_current->stack_info.size += FP_GUARD_EXTRA_SIZE;
232 	}
233 #endif /* FP_GUARD_EXTRA_SIZE */
234 	_current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
235 	_current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
236 #endif /* CONFIG_THREAD_STACK_INFO */
237 
238 	/* Stack guard area reserved at the bottom of the thread's
239 	 * privileged stack. Adjust the available (writable) stack
240 	 * buffer area accordingly.
241 	 */
242 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
243 	_current->arch.priv_stack_start +=
244 		((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
245 			? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
246 			: MPU_GUARD_ALIGN_AND_SIZE;
247 #else
248 	_current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
249 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
250 #endif /* CONFIG_MPU_STACK_GUARD */
251 
252 	z_arm_userspace_enter(user_entry, p1, p2, p3, (uint32_t)_current->stack_info.start,
253 			      _current->stack_info.size - _current->stack_info.delta);
254 	CODE_UNREACHABLE;
255 }
256 
z_arm_thread_is_in_user_mode(void)257 bool z_arm_thread_is_in_user_mode(void)
258 {
259 	uint32_t value;
260 
261 	/* return mode information */
262 	value = __get_CONTROL();
263 	return (value & CONTROL_nPRIV_Msk) != 0;
264 }
265 EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
266 #endif
267 
268 #if defined(CONFIG_BUILTIN_STACK_GUARD)
269 /*
270  * @brief Configure ARM built-in stack guard
271  *
272  * This function configures per thread stack guards by reprogramming
273  * the built-in Process Stack Pointer Limit Register (PSPLIM).
274  * The functionality is meant to be used during context switch.
275  *
276  * @param thread thread info data structure.
277  */
configure_builtin_stack_guard(struct k_thread * thread)278 void configure_builtin_stack_guard(struct k_thread *thread)
279 {
280 #if defined(CONFIG_USERSPACE)
281 	if ((thread->arch.mode & CONTROL_nPRIV_Msk) != 0) {
282 		/* Only configure stack limit for threads in privileged mode
283 		 * (i.e supervisor threads or user threads doing system call).
284 		 * User threads executing in user mode do not require a stack
285 		 * limit protection.
286 		 */
287 		__set_PSPLIM(0);
288 		return;
289 	}
290 	/* Only configure PSPLIM to guard the privileged stack area, if
291 	 * the thread is currently using it, otherwise guard the default
292 	 * thread stack. Note that the conditional check relies on the
293 	 * thread privileged stack being allocated in higher memory area
294 	 * than the default thread stack (ensured by design).
295 	 */
296 	uint32_t guard_start =
297 		((thread->arch.priv_stack_start) && (__get_PSP() >= thread->arch.priv_stack_start))
298 			? (uint32_t)thread->arch.priv_stack_start
299 			: (uint32_t)thread->stack_obj;
300 
301 	__ASSERT(thread->stack_info.start == ((uint32_t)thread->stack_obj),
302 		 "stack_info.start does not point to the start of the thread allocated area.");
303 #else
304 	uint32_t guard_start = thread->stack_info.start;
305 #endif
306 #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
307 	__set_PSPLIM(guard_start);
308 #else
309 #error "Built-in PSP limit checks not supported by HW"
310 #endif
311 }
312 #endif /* CONFIG_BUILTIN_STACK_GUARD */
313 
314 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
315 
316 #define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr)                      \
317 	((fault_addr != -EINVAL)                                                                   \
318 		 ? ((fault_addr >= guard_start) && (fault_addr < (guard_start + guard_len)) &&     \
319 		    (stack_ptr < (guard_start + guard_len)))                                       \
320 		 : (stack_ptr < (guard_start + guard_len)))
321 
322 /**
323  * @brief Assess occurrence of current thread's stack corruption
324  *
325  * This function performs an assessment whether a memory fault (on a
326  * given memory address) is the result of stack memory corruption of
327  * the current thread.
328  *
329  * Thread stack corruption for supervisor threads or user threads in
330  * privilege mode (when User Space is supported) is reported upon an
331  * attempt to access the stack guard area (if MPU Stack Guard feature
332  * is supported). Additionally the current PSP (process stack pointer)
333  * must be pointing inside or below the guard area.
334  *
335  * Thread stack corruption for user threads in user mode is reported,
336  * if the current PSP is pointing below the start of the current
337  * thread's stack.
338  *
339  * Notes:
340  * - we assume a fully descending stack,
341  * - we assume a stacking error has occurred,
342  * - the function shall be called when handling MemManage and Bus fault,
343  *   and only if a Stacking error has been reported.
344  *
345  * If stack corruption is detected, the function returns the lowest
346  * allowed address where the Stack Pointer can safely point to, to
347  * prevent from errors when un-stacking the corrupted stack frame
348  * upon exception return.
349  *
350  * @param fault_addr memory address on which memory access violation
351  *                   has been reported. It can be invalid (-EINVAL),
352  *                   if only Stacking error has been reported.
353  * @param psp        current address the PSP points to
354  *
355  * @return The lowest allowed stack frame pointer, if error is a
356  *         thread stack corruption, otherwise return 0.
357  */
z_check_thread_stack_fail(const uint32_t fault_addr,const uint32_t psp)358 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
359 {
360 #if defined(CONFIG_MULTITHREADING)
361 	const struct k_thread *thread = _current;
362 
363 	if (thread == NULL) {
364 		return 0;
365 	}
366 #endif
367 
368 #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && defined(CONFIG_MPU_STACK_GUARD)
369 	uint32_t guard_len = ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0)
370 				     ? MPU_GUARD_ALIGN_AND_SIZE_FLOAT
371 				     : MPU_GUARD_ALIGN_AND_SIZE;
372 #else
373 	/* If MPU_STACK_GUARD is not enabled, the guard length is
374 	 * effectively zero. Stack overflows may be detected only
375 	 * for user threads in nPRIV mode.
376 	 */
377 	uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE;
378 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
379 
380 #if defined(CONFIG_USERSPACE)
381 	if (thread->arch.priv_stack_start) {
382 		/* User thread */
383 		if (z_arm_thread_is_in_user_mode() == false) {
384 			/* User thread in privilege mode */
385 			if (IS_MPU_GUARD_VIOLATION(thread->arch.priv_stack_start - guard_len,
386 						   guard_len, fault_addr, psp)) {
387 				/* Thread's privilege stack corruption */
388 				return thread->arch.priv_stack_start;
389 			}
390 		} else {
391 			if (psp < (uint32_t)thread->stack_obj) {
392 				/* Thread's user stack corruption */
393 				return (uint32_t)thread->stack_obj;
394 			}
395 		}
396 	} else {
397 		/* Supervisor thread */
398 		if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len, guard_len,
399 					   fault_addr, psp)) {
400 			/* Supervisor thread stack corruption */
401 			return thread->stack_info.start;
402 		}
403 	}
404 #else /* CONFIG_USERSPACE */
405 #if defined(CONFIG_MULTITHREADING)
406 	if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len, guard_len, fault_addr,
407 				   psp)) {
408 		/* Thread stack corruption */
409 		return thread->stack_info.start;
410 	}
411 #else
412 	if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack, guard_len, fault_addr, psp)) {
413 		/* Thread stack corruption */
414 		return (uint32_t)K_THREAD_STACK_BUFFER(z_main_stack);
415 	}
416 #endif
417 #endif /* CONFIG_USERSPACE */
418 
419 	return 0;
420 }
421 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
422 
423 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)424 int arch_float_disable(struct k_thread *thread)
425 {
426 	if (thread != _current) {
427 		return -EINVAL;
428 	}
429 
430 	if (arch_is_in_isr()) {
431 		return -EINVAL;
432 	}
433 
434 	/* Disable all floating point capabilities for the thread */
435 
436 	/* K_FP_REG flag is used in SWAP and stack check fail. Locking
437 	 * interrupts here prevents a possible context-switch or MPU
438 	 * fault to take an outdated thread user_options flag into
439 	 * account.
440 	 */
441 	int key = arch_irq_lock();
442 
443 	thread->base.user_options &= ~K_FP_REGS;
444 
445 	__set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk));
446 
447 	/* No need to add an ISB barrier after setting the CONTROL
448 	 * register; arch_irq_unlock() already adds one.
449 	 */
450 
451 	arch_irq_unlock(key);
452 
453 	return 0;
454 }
455 
arch_float_enable(struct k_thread * thread,unsigned int options)456 int arch_float_enable(struct k_thread *thread, unsigned int options)
457 {
458 	/* This is not supported in Cortex-M */
459 	return -ENOTSUP;
460 }
461 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
462 
463 /* Internal function for Cortex-M initialization,
464  * applicable to either case of running Zephyr
465  * with or without multi-threading support.
466  */
z_arm_prepare_switch_to_main(void)467 static void z_arm_prepare_switch_to_main(void)
468 {
469 #if defined(CONFIG_FPU)
470 	/* Initialize the Floating Point Status and Control Register when in
471 	 * Unshared FP Registers mode (In Shared FP Registers mode, FPSCR is
472 	 * initialized at thread creation for threads that make use of the FP).
473 	 */
474 #if defined(CONFIG_ARMV8_1_M_MAINLINE)
475 	/*
476 	 * For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
477 	 * to 0b100 for "Tail predication not applied" as it's reset value
478 	 */
479 	__set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
480 #else
481 	__set_FPSCR(0);
482 #endif
483 #if defined(CONFIG_FPU_SHARING)
484 	/* In Sharing mode clearing FPSCR may set the CONTROL.FPCA flag. */
485 	__set_CONTROL(__get_CONTROL() & (~(CONTROL_FPCA_Msk)));
486 	barrier_isync_fence_full();
487 #endif /* CONFIG_FPU_SHARING */
488 #endif /* CONFIG_FPU */
489 }
490 
arch_switch_to_main_thread(struct k_thread * main_thread,char * stack_ptr,k_thread_entry_t _main)491 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
492 				k_thread_entry_t _main)
493 {
494 	z_arm_prepare_switch_to_main();
495 
496 	z_current_thread_set(main_thread);
497 
498 #if defined(CONFIG_THREAD_LOCAL_STORAGE)
499 	/* On Cortex-M, TLS uses a global variable as pointer to
500 	 * the thread local storage area. So this needs to point
501 	 * to the main thread's TLS area before switching to any
502 	 * thread for the first time, as the pointer is only set
503 	 * during context switching.
504 	 */
505 	extern uintptr_t z_arm_tls_ptr;
506 
507 	z_arm_tls_ptr = main_thread->tls;
508 #endif
509 
510 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
511 	z_thread_mark_switched_in();
512 #endif
513 
514 	/* the ready queue cache already contains the main thread */
515 
516 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
517 	/*
518 	 * If stack protection is enabled, make sure to set it
519 	 * before jumping to thread entry function
520 	 */
521 	z_arm_configure_dynamic_mpu_regions(main_thread);
522 #endif
523 
524 #if defined(CONFIG_BUILTIN_STACK_GUARD)
525 	/* Set PSPLIM register for built-in stack guarding of main thread. */
526 #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
527 	__set_PSPLIM(main_thread->stack_info.start);
528 #else
529 #error "Built-in PSP limit checks not supported by the hardware."
530 #endif
531 #endif /* CONFIG_BUILTIN_STACK_GUARD */
532 
533 	/*
534 	 * Set PSP to the highest address of the main stack
535 	 * before enabling interrupts and jumping to main.
536 	 *
537 	 * The compiler may store _main on the stack, but this
538 	 * location is relative to `PSP`.
539 	 * This assembly block ensures that _main is stored in
540 	 * a callee saved register before switching stack and continuing
541 	 * with the thread entry process.
542 	 *
543 	 * When calling arch_irq_unlock_outlined, LR is lost which is fine since
544 	 * we do not intend to return after calling z_thread_entry.
545 	 */
546 	__asm__ volatile("mov   r4,  %0\n" /* force _main to be stored in a register */
547 			 "msr   PSP, %1\n" /* __set_PSP(stack_ptr) */
548 
549 			 "movs  r0,  #0\n" /* arch_irq_unlock(0) */
550 			 "ldr   r3, =arch_irq_unlock_outlined\n"
551 			 "blx   r3\n"
552 
553 			 "mov   r0, r4\n" /* z_thread_entry(_main, NULL, NULL, NULL) */
554 			 "movs  r1, #0\n"
555 			 "movs  r2, #0\n"
556 			 "movs  r3, #0\n"
557 			 "ldr   r4, =z_thread_entry\n"
558 			 /* We don’t intend to return, so there is no need to link. */
559 			 "bx    r4\n"
560 			 :
561 			 : "r"(_main), "r"(stack_ptr)
562 			 : "r0", "r1", "r2", "r3", "r4", "ip", "lr", "memory");
563 
564 	CODE_UNREACHABLE;
565 }
566 
arch_irq_unlock_outlined(unsigned int key)567 __used void arch_irq_unlock_outlined(unsigned int key)
568 {
569 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
570 	__enable_fault_irq(); /* alters FAULTMASK */
571 	__enable_irq();       /* alters PRIMASK */
572 #endif
573 	arch_irq_unlock(key);
574 }
575 
arch_irq_lock_outlined(void)576 __used unsigned int arch_irq_lock_outlined(void)
577 {
578 	return arch_irq_lock();
579 }
580 
581 #if !defined(CONFIG_MULTITHREADING)
582 
z_arm_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)583 FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(k_thread_entry_t main_entry, void *p1,
584 							  void *p2, void *p3)
585 {
586 	z_arm_prepare_switch_to_main();
587 
588 	/* Set PSP to the highest address of the main stack. */
589 	char *psp = K_THREAD_STACK_BUFFER(z_main_stack) + K_THREAD_STACK_SIZEOF(z_main_stack);
590 
591 #if defined(CONFIG_BUILTIN_STACK_GUARD)
592 	char *psplim = (K_THREAD_STACK_BUFFER(z_main_stack));
593 	/* Clear PSPLIM before setting it to guard the main stack area. */
594 	__set_PSPLIM(0);
595 #endif
596 
597 	/* Store all required input in registers, to be accessible
598 	 * after stack pointer change. The function is not going
599 	 * to return, so callee-saved registers do not need to be
600 	 * stacked.
601 	 *
602 	 * The compiler may store _main on the stack, but this
603 	 * location is relative to `PSP`.
604 	 * This assembly block ensures that _main is stored in
605 	 * a callee saved register before switching stack and continuing
606 	 * with the thread entry process.
607 	 */
608 
609 	__asm__ volatile(
610 #ifdef CONFIG_BUILTIN_STACK_GUARD
611 		"msr  PSPLIM, %[_psplim]\n" /* __set_PSPLIM(_psplim) */
612 #endif
613 		"msr  PSP, %[_psp]\n" /* __set_PSP(psp) */
614 		"movs r0, #0\n"
615 		"ldr r1, =arch_irq_unlock_outlined\n"
616 		"blx r1\n"
617 
618 		"mov r0, %[_p1]\n"
619 		"mov r1, %[_p2]\n"
620 		"mov r2, %[_p3]\n"
621 		"blx  %[_main_entry]\n" /* main_entry(p1, p2, p3) */
622 
623 		"ldr r0, =arch_irq_lock_outlined\n"
624 		"blx r0\n"
625 		"loop: b loop\n\t" /* while (true); */
626 		:
627 		: [_p1] "r"(p1), [_p2] "r"(p2), [_p3] "r"(p3), [_psp] "r"(psp),
628 		  [_main_entry] "r"(main_entry)
629 #ifdef CONFIG_BUILTIN_STACK_GUARD
630 			  ,
631 		  [_psplim] "r"(psplim)
632 #endif
633 		: "r0", "r1", "r2", "ip", "lr");
634 
635 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
636 }
637 #endif /* !CONFIG_MULTITHREADING */
638