1 /*
2  * Copyright (c) 2013-2014 Wind River Systems, Inc.
3  * Copyright (c) 2021 Lexmark International, Inc.
4  * Copyright (c) 2023 Arm Limited
5  *
6  * SPDX-License-Identifier: Apache-2.0
7  */
8 
9 /**
10  * @file
11  * @brief New thread creation for ARM Cortex-M
12  *
13  * Core thread related primitives for the ARM Cortex-M
14  * processor architecture.
15  */
16 
17 #include <zephyr/kernel.h>
18 #include <zephyr/llext/symbol.h>
19 #include <ksched.h>
20 #include <zephyr/sys/barrier.h>
21 #include <stdbool.h>
22 #include <cmsis_core.h>
23 
24 #if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE)
25 #define FP_GUARD_EXTRA_SIZE	(MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \
26 				 MPU_GUARD_ALIGN_AND_SIZE)
27 #else
28 #define FP_GUARD_EXTRA_SIZE	0
29 #endif
30 
31 #ifndef EXC_RETURN_FTYPE
32 /* bit [4] allocate stack for floating-point context: 0=done 1=skipped  */
33 #define EXC_RETURN_FTYPE           (0x00000010UL)
34 #endif
35 
36 /* Default last octet of EXC_RETURN, for threads that have not run yet.
37  * The full EXC_RETURN value will be e.g. 0xFFFFFFBC.
38  */
39 #if defined(CONFIG_ARM_NONSECURE_FIRMWARE)
40 #define DEFAULT_EXC_RETURN 0xBC;
41 #else
42 #define DEFAULT_EXC_RETURN 0xFD;
43 #endif
44 
45 #if !defined(CONFIG_MULTITHREADING)
46 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
47 #endif
48 
49 /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
50  * end of the stack, and thus reusable by the stack when not needed anymore.
51  *
52  * The initial context is an exception stack frame (ESF) since exiting the
53  * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
54  * an instruction address to jump to must always be set since the CPU always
55  * runs in thumb mode, the ESF expects the real address of the instruction,
56  * with the lsb *not* set (instructions are always aligned on 16 bit
57  * halfwords).  Since the compiler automatically sets the lsb of function
58  * addresses, we have to unset it manually before storing it in the 'pc' field
59  * of the ESF.
60  */
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)61 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
62 		     char *stack_ptr, k_thread_entry_t entry,
63 		     void *p1, void *p2, void *p3)
64 {
65 	struct __basic_sf *iframe;
66 
67 #ifdef CONFIG_MPU_STACK_GUARD
68 #if defined(CONFIG_USERSPACE)
69 	if (z_stack_is_user_capable(stack)) {
70 		/* Guard area is carved-out of the buffer instead of reserved
71 		 * for stacks that can host user threads
72 		 */
73 		thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
74 		thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
75 	}
76 #endif /* CONFIG_USERSPACE */
77 #if FP_GUARD_EXTRA_SIZE > 0
78 	if ((thread->base.user_options & K_FP_REGS) != 0) {
79 		/* Larger guard needed due to lazy stacking of FP regs may
80 		 * overshoot the guard area without writing anything. We
81 		 * carve it out of the stack buffer as-needed instead of
82 		 * unconditionally reserving it.
83 		 */
84 		thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
85 		thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
86 	}
87 #endif /* FP_GUARD_EXTRA_SIZE */
88 #endif /* CONFIG_MPU_STACK_GUARD */
89 
90 	iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
91 #if defined(CONFIG_USERSPACE)
92 	if ((thread->base.user_options & K_USER) != 0) {
93 		iframe->pc = (uint32_t)arch_user_mode_enter;
94 	} else {
95 		iframe->pc = (uint32_t)z_thread_entry;
96 	}
97 #else
98 	iframe->pc = (uint32_t)z_thread_entry;
99 #endif
100 
101 	/* force ARM mode by clearing LSB of address */
102 	iframe->pc &= 0xfffffffe;
103 	iframe->a1 = (uint32_t)entry;
104 	iframe->a2 = (uint32_t)p1;
105 	iframe->a3 = (uint32_t)p2;
106 	iframe->a4 = (uint32_t)p3;
107 
108 	iframe->xpsr =
109 		0x01000000UL; /* clear all, thumb bit is 1, even if RO */
110 
111 	thread->callee_saved.psp = (uint32_t)iframe;
112 	thread->arch.basepri = 0;
113 
114 #if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
115 	thread->arch.mode = 0;
116 #if defined(CONFIG_ARM_STORE_EXC_RETURN)
117 	thread->arch.mode_exc_return = DEFAULT_EXC_RETURN;
118 #endif
119 #if FP_GUARD_EXTRA_SIZE > 0
120 	if ((thread->base.user_options & K_FP_REGS) != 0) {
121 		thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
122 	}
123 #endif
124 #if defined(CONFIG_USERSPACE)
125 	thread->arch.priv_stack_start = 0;
126 #endif
127 #endif
128 	/*
129 	 * initial values in all other registers/thread entries are
130 	 * irrelevant.
131 	 */
132 }
133 
134 #if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \
135 	&& defined(CONFIG_FPU_SHARING)
136 
z_arm_thread_stack_info_adjust(struct k_thread * thread,bool use_large_guard)137 static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
138 	bool use_large_guard)
139 {
140 	if (use_large_guard) {
141 		/* Switch to use a large MPU guard if not already. */
142 		if ((thread->arch.mode &
143 			Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) {
144 			/* Default guard size is used. Update required. */
145 			thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
146 #if defined(CONFIG_USERSPACE)
147 			if (thread->arch.priv_stack_start) {
148 				/* User thread */
149 				thread->arch.priv_stack_start +=
150 					FP_GUARD_EXTRA_SIZE;
151 			} else
152 #endif /* CONFIG_USERSPACE */
153 			{
154 				/* Privileged thread */
155 				thread->stack_info.start +=
156 					FP_GUARD_EXTRA_SIZE;
157 				thread->stack_info.size -=
158 					FP_GUARD_EXTRA_SIZE;
159 			}
160 		}
161 	} else {
162 		/* Switch to use the default MPU guard size if not already. */
163 		if ((thread->arch.mode &
164 			Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
165 			/* Large guard size is used. Update required. */
166 			thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
167 #if defined(CONFIG_USERSPACE)
168 			if (thread->arch.priv_stack_start) {
169 				/* User thread */
170 				thread->arch.priv_stack_start -=
171 					FP_GUARD_EXTRA_SIZE;
172 			} else
173 #endif /* CONFIG_USERSPACE */
174 			{
175 				/* Privileged thread */
176 				thread->stack_info.start -=
177 					FP_GUARD_EXTRA_SIZE;
178 				thread->stack_info.size +=
179 					FP_GUARD_EXTRA_SIZE;
180 			}
181 		}
182 	}
183 }
184 
185 /*
186  * Adjust the MPU stack guard size together with the FPU
187  * policy and the stack_info values for the thread that is
188  * being switched in.
189  */
z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread * thread)190 uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
191 {
192 	if (((thread->base.user_options & K_FP_REGS) != 0) ||
193 		((thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)) {
194 		/* The thread has been pre-tagged (at creation or later) with
195 		 * K_FP_REGS, i.e. it is expected to be using the FPU registers
196 		 * (if not already). Activate lazy stacking and program a large
197 		 * MPU guard to safely detect privilege thread stack overflows.
198 		 *
199 		 * OR
200 		 * The thread is not pre-tagged with K_FP_REGS, but it has
201 		 * generated an FP context. Activate lazy stacking and
202 		 * program a large MPU guard to detect privilege thread
203 		 * stack overflows.
204 		 */
205 		FPU->FPCCR |= FPU_FPCCR_LSPEN_Msk;
206 
207 		z_arm_thread_stack_info_adjust(thread, true);
208 
209 		/* Tag the thread with K_FP_REGS */
210 		thread->base.user_options |= K_FP_REGS;
211 
212 		return MPU_GUARD_ALIGN_AND_SIZE_FLOAT;
213 	}
214 
215 	/* Thread is not pre-tagged with K_FP_REGS, and it has
216 	 * not been using the FPU. Since there is no active FPU
217 	 * context, de-activate lazy stacking and program the
218 	 * default MPU guard size.
219 	 */
220 	FPU->FPCCR &= (~FPU_FPCCR_LSPEN_Msk);
221 
222 	z_arm_thread_stack_info_adjust(thread, false);
223 
224 	return MPU_GUARD_ALIGN_AND_SIZE;
225 }
226 #endif
227 
228 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)229 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
230 					void *p1, void *p2, void *p3)
231 {
232 
233 	/* Set up privileged stack before entering user mode */
234 	arch_current_thread()->arch.priv_stack_start =
235 		(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
236 #if defined(CONFIG_MPU_STACK_GUARD)
237 #if defined(CONFIG_THREAD_STACK_INFO)
238 	/* We're dropping to user mode which means the guard area is no
239 	 * longer used here, it instead is moved to the privilege stack
240 	 * to catch stack overflows there. Un-do the calculations done
241 	 * which accounted for memory borrowed from the thread stack.
242 	 */
243 #if FP_GUARD_EXTRA_SIZE > 0
244 	if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
245 		arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
246 		arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
247 	}
248 #endif /* FP_GUARD_EXTRA_SIZE */
249 	arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
250 	arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
251 #endif /* CONFIG_THREAD_STACK_INFO */
252 
253 	/* Stack guard area reserved at the bottom of the thread's
254 	 * privileged stack. Adjust the available (writable) stack
255 	 * buffer area accordingly.
256 	 */
257 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
258 	arch_current_thread()->arch.priv_stack_start +=
259 		((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
260 		MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
261 #else
262 	arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
263 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
264 #endif /* CONFIG_MPU_STACK_GUARD */
265 
266 	z_arm_userspace_enter(user_entry, p1, p2, p3,
267 			     (uint32_t)arch_current_thread()->stack_info.start,
268 			     arch_current_thread()->stack_info.size -
269 			     arch_current_thread()->stack_info.delta);
270 	CODE_UNREACHABLE;
271 }
272 
273 
z_arm_thread_is_in_user_mode(void)274 bool z_arm_thread_is_in_user_mode(void)
275 {
276 	uint32_t value;
277 
278 	/* return mode information */
279 	value = __get_CONTROL();
280 	return (value & CONTROL_nPRIV_Msk) != 0;
281 }
282 EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
283 #endif
284 
285 #if defined(CONFIG_BUILTIN_STACK_GUARD)
286 /*
287  * @brief Configure ARM built-in stack guard
288  *
289  * This function configures per thread stack guards by reprogramming
290  * the built-in Process Stack Pointer Limit Register (PSPLIM).
291  * The functionality is meant to be used during context switch.
292  *
293  * @param thread thread info data structure.
294  */
configure_builtin_stack_guard(struct k_thread * thread)295 void configure_builtin_stack_guard(struct k_thread *thread)
296 {
297 #if defined(CONFIG_USERSPACE)
298 	if ((thread->arch.mode & CONTROL_nPRIV_Msk) != 0) {
299 		/* Only configure stack limit for threads in privileged mode
300 		 * (i.e supervisor threads or user threads doing system call).
301 		 * User threads executing in user mode do not require a stack
302 		 * limit protection.
303 		 */
304 		__set_PSPLIM(0);
305 		return;
306 	}
307 	/* Only configure PSPLIM to guard the privileged stack area, if
308 	 * the thread is currently using it, otherwise guard the default
309 	 * thread stack. Note that the conditional check relies on the
310 	 * thread privileged stack being allocated in higher memory area
311 	 * than the default thread stack (ensured by design).
312 	 */
313 	uint32_t guard_start =
314 		((thread->arch.priv_stack_start) &&
315 			(__get_PSP() >= thread->arch.priv_stack_start)) ?
316 		(uint32_t)thread->arch.priv_stack_start :
317 		(uint32_t)thread->stack_obj;
318 
319 	__ASSERT(thread->stack_info.start == ((uint32_t)thread->stack_obj),
320 		"stack_info.start does not point to the start of the"
321 		"thread allocated area.");
322 #else
323 	uint32_t guard_start = thread->stack_info.start;
324 #endif
325 #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
326 	__set_PSPLIM(guard_start);
327 #else
328 #error "Built-in PSP limit checks not supported by HW"
329 #endif
330 }
331 #endif /* CONFIG_BUILTIN_STACK_GUARD */
332 
333 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
334 
335 #define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr) \
336 	((fault_addr != -EINVAL) ? \
337 	((fault_addr >= guard_start) && \
338 	(fault_addr < (guard_start + guard_len)) && \
339 	(stack_ptr < (guard_start + guard_len))) \
340 	: \
341 	(stack_ptr < (guard_start + guard_len)))
342 
343 /**
344  * @brief Assess occurrence of current thread's stack corruption
345  *
346  * This function performs an assessment whether a memory fault (on a
347  * given memory address) is the result of stack memory corruption of
348  * the current thread.
349  *
350  * Thread stack corruption for supervisor threads or user threads in
351  * privilege mode (when User Space is supported) is reported upon an
352  * attempt to access the stack guard area (if MPU Stack Guard feature
353  * is supported). Additionally the current PSP (process stack pointer)
354  * must be pointing inside or below the guard area.
355  *
356  * Thread stack corruption for user threads in user mode is reported,
357  * if the current PSP is pointing below the start of the current
358  * thread's stack.
359  *
360  * Notes:
361  * - we assume a fully descending stack,
362  * - we assume a stacking error has occurred,
363  * - the function shall be called when handling MemManage and Bus fault,
364  *   and only if a Stacking error has been reported.
365  *
366  * If stack corruption is detected, the function returns the lowest
367  * allowed address where the Stack Pointer can safely point to, to
368  * prevent from errors when un-stacking the corrupted stack frame
369  * upon exception return.
370  *
371  * @param fault_addr memory address on which memory access violation
372  *                   has been reported. It can be invalid (-EINVAL),
373  *                   if only Stacking error has been reported.
374  * @param psp        current address the PSP points to
375  *
376  * @return The lowest allowed stack frame pointer, if error is a
377  *         thread stack corruption, otherwise return 0.
378  */
z_check_thread_stack_fail(const uint32_t fault_addr,const uint32_t psp)379 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
380 {
381 #if defined(CONFIG_MULTITHREADING)
382 	const struct k_thread *thread = arch_current_thread();
383 
384 	if (thread == NULL) {
385 		return 0;
386 	}
387 #endif
388 
389 #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
390 	defined(CONFIG_MPU_STACK_GUARD)
391 	uint32_t guard_len =
392 		((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
393 		MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
394 #else
395 	/* If MPU_STACK_GUARD is not enabled, the guard length is
396 	 * effectively zero. Stack overflows may be detected only
397 	 * for user threads in nPRIV mode.
398 	 */
399 	uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE;
400 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
401 
402 #if defined(CONFIG_USERSPACE)
403 	if (thread->arch.priv_stack_start) {
404 		/* User thread */
405 		if (z_arm_thread_is_in_user_mode() == false) {
406 			/* User thread in privilege mode */
407 			if (IS_MPU_GUARD_VIOLATION(
408 				thread->arch.priv_stack_start - guard_len,
409 					guard_len,
410 				fault_addr, psp)) {
411 				/* Thread's privilege stack corruption */
412 				return thread->arch.priv_stack_start;
413 			}
414 		} else {
415 			if (psp < (uint32_t)thread->stack_obj) {
416 				/* Thread's user stack corruption */
417 				return (uint32_t)thread->stack_obj;
418 			}
419 		}
420 	} else {
421 		/* Supervisor thread */
422 		if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start -
423 				guard_len,
424 				guard_len,
425 				fault_addr, psp)) {
426 			/* Supervisor thread stack corruption */
427 			return thread->stack_info.start;
428 		}
429 	}
430 #else /* CONFIG_USERSPACE */
431 #if defined(CONFIG_MULTITHREADING)
432 	if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len,
433 			guard_len,
434 			fault_addr, psp)) {
435 		/* Thread stack corruption */
436 		return thread->stack_info.start;
437 	}
438 #else
439 	if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack,
440 			guard_len,
441 			fault_addr, psp)) {
442 		/* Thread stack corruption */
443 		return (uint32_t)K_THREAD_STACK_BUFFER(z_main_stack);
444 	}
445 #endif
446 #endif /* CONFIG_USERSPACE */
447 
448 	return 0;
449 }
450 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
451 
452 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)453 int arch_float_disable(struct k_thread *thread)
454 {
455 	if (thread != arch_current_thread()) {
456 		return -EINVAL;
457 	}
458 
459 	if (arch_is_in_isr()) {
460 		return -EINVAL;
461 	}
462 
463 	/* Disable all floating point capabilities for the thread */
464 
465 	/* K_FP_REG flag is used in SWAP and stack check fail. Locking
466 	 * interrupts here prevents a possible context-switch or MPU
467 	 * fault to take an outdated thread user_options flag into
468 	 * account.
469 	 */
470 	int key = arch_irq_lock();
471 
472 	thread->base.user_options &= ~K_FP_REGS;
473 
474 	__set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk));
475 
476 	/* No need to add an ISB barrier after setting the CONTROL
477 	 * register; arch_irq_unlock() already adds one.
478 	 */
479 
480 	arch_irq_unlock(key);
481 
482 	return 0;
483 }
484 
arch_float_enable(struct k_thread * thread,unsigned int options)485 int arch_float_enable(struct k_thread *thread, unsigned int options)
486 {
487 	/* This is not supported in Cortex-M */
488 	return -ENOTSUP;
489 }
490 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
491 
492 /* Internal function for Cortex-M initialization,
493  * applicable to either case of running Zephyr
494  * with or without multi-threading support.
495  */
z_arm_prepare_switch_to_main(void)496 static void z_arm_prepare_switch_to_main(void)
497 {
498 #if defined(CONFIG_FPU)
499 	/* Initialize the Floating Point Status and Control Register when in
500 	 * Unshared FP Registers mode (In Shared FP Registers mode, FPSCR is
501 	 * initialized at thread creation for threads that make use of the FP).
502 	 */
503 #if defined(CONFIG_ARMV8_1_M_MAINLINE)
504 	/*
505 	 * For ARMv8.1-M with FPU, the FPSCR[18:16] LTPSIZE field must be set
506 	 * to 0b100 for "Tail predication not applied" as it's reset value
507 	 */
508 	__set_FPSCR(4 << FPU_FPDSCR_LTPSIZE_Pos);
509 #else
510 	__set_FPSCR(0);
511 #endif
512 #if defined(CONFIG_FPU_SHARING)
513 	/* In Sharing mode clearing FPSCR may set the CONTROL.FPCA flag. */
514 	__set_CONTROL(__get_CONTROL() & (~(CONTROL_FPCA_Msk)));
515 	barrier_isync_fence_full();
516 #endif /* CONFIG_FPU_SHARING */
517 #endif /* CONFIG_FPU */
518 }
519 
arch_switch_to_main_thread(struct k_thread * main_thread,char * stack_ptr,k_thread_entry_t _main)520 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
521 				k_thread_entry_t _main)
522 {
523 	z_arm_prepare_switch_to_main();
524 
525 	arch_current_thread_set(main_thread);
526 
527 #if defined(CONFIG_THREAD_LOCAL_STORAGE)
528 	/* On Cortex-M, TLS uses a global variable as pointer to
529 	 * the thread local storage area. So this needs to point
530 	 * to the main thread's TLS area before switching to any
531 	 * thread for the first time, as the pointer is only set
532 	 * during context switching.
533 	 */
534 	extern uintptr_t z_arm_tls_ptr;
535 
536 	z_arm_tls_ptr = main_thread->tls;
537 #endif
538 
539 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
540 	z_thread_mark_switched_in();
541 #endif
542 
543 	/* the ready queue cache already contains the main thread */
544 
545 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
546 	/*
547 	 * If stack protection is enabled, make sure to set it
548 	 * before jumping to thread entry function
549 	 */
550 	z_arm_configure_dynamic_mpu_regions(main_thread);
551 #endif
552 
553 #if defined(CONFIG_BUILTIN_STACK_GUARD)
554 	/* Set PSPLIM register for built-in stack guarding of main thread. */
555 #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
556 	__set_PSPLIM(main_thread->stack_info.start);
557 #else
558 #error "Built-in PSP limit checks not supported by the hardware."
559 #endif
560 #endif /* CONFIG_BUILTIN_STACK_GUARD */
561 
562 	/*
563 	 * Set PSP to the highest address of the main stack
564 	 * before enabling interrupts and jumping to main.
565 	 *
566 	 * The compiler may store _main on the stack, but this
567 	 * location is relative to `PSP`.
568 	 * This assembly block ensures that _main is stored in
569 	 * a callee saved register before switching stack and continuing
570 	 * with the thread entry process.
571 	 *
572 	 * When calling arch_irq_unlock_outlined, LR is lost which is fine since
573 	 * we do not intend to return after calling z_thread_entry.
574 	 */
575 	__asm__ volatile (
576 	"mov   r4,  %0\n"	/* force _main to be stored in a register */
577 	"msr   PSP, %1\n"	/* __set_PSP(stack_ptr) */
578 
579 	"movs  r0,  #0\n"	/* arch_irq_unlock(0) */
580 	"ldr   r3, =arch_irq_unlock_outlined\n"
581 	"blx   r3\n"
582 
583 	"mov   r0, r4\n"	/* z_thread_entry(_main, NULL, NULL, NULL) */
584 	"movs  r1, #0\n"
585 	"movs  r2, #0\n"
586 	"movs  r3, #0\n"
587 	"ldr   r4, =z_thread_entry\n"
588 	"bx    r4\n"		/* We don’t intend to return, so there is no need to link. */
589 	:
590 	: "r" (_main), "r" (stack_ptr)
591 	: "r0", "r1", "r2", "r3", "r4", "ip", "lr", "memory");
592 
593 	CODE_UNREACHABLE;
594 }
595 
arch_irq_unlock_outlined(unsigned int key)596 __used void arch_irq_unlock_outlined(unsigned int key)
597 {
598 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
599 	__enable_fault_irq(); /* alters FAULTMASK */
600 	__enable_irq(); /* alters PRIMASK */
601 #endif
602 	arch_irq_unlock(key);
603 }
604 
arch_irq_lock_outlined(void)605 __used unsigned int arch_irq_lock_outlined(void)
606 {
607 	return arch_irq_lock();
608 }
609 
610 #if !defined(CONFIG_MULTITHREADING)
611 
z_arm_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)612 FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(
613 	k_thread_entry_t main_entry, void *p1, void *p2, void *p3)
614 {
615 	z_arm_prepare_switch_to_main();
616 
617 	/* Set PSP to the highest address of the main stack. */
618 	char *psp =	K_THREAD_STACK_BUFFER(z_main_stack) +
619 		K_THREAD_STACK_SIZEOF(z_main_stack);
620 
621 #if defined(CONFIG_BUILTIN_STACK_GUARD)
622 	char *psplim = (K_THREAD_STACK_BUFFER(z_main_stack));
623 	/* Clear PSPLIM before setting it to guard the main stack area. */
624 	__set_PSPLIM(0);
625 #endif
626 
627 	/* Store all required input in registers, to be accessible
628 	 * after stack pointer change. The function is not going
629 	 * to return, so callee-saved registers do not need to be
630 	 * stacked.
631 	 *
632 	 * The compiler may store _main on the stack, but this
633 	 * location is relative to `PSP`.
634 	 * This assembly block ensures that _main is stored in
635 	 * a callee saved register before switching stack and continuing
636 	 * with the thread entry process.
637 	 */
638 
639 	__asm__ volatile (
640 #ifdef CONFIG_BUILTIN_STACK_GUARD
641 	"msr  PSPLIM, %[_psplim]\n" /* __set_PSPLIM(_psplim) */
642 #endif
643 	"msr  PSP, %[_psp]\n"       /* __set_PSP(psp) */
644 	"mov r0, #0\n"
645 	"ldr r1, =arch_irq_unlock_outlined\n"
646 	"blx r1\n"
647 
648 	"mov r0, %[_p1]\n"
649 	"mov r1, %[_p2]\n"
650 	"mov r2, %[_p3]\n"
651 	"blx  %[_main_entry]\n"     /* main_entry(p1, p2, p3) */
652 
653 	"ldr r0, =arch_irq_lock_outlined\n"
654 	"blx r0\n"
655 	"loop: b loop\n\t"    /* while (true); */
656 	:
657 	: [_p1]"r" (p1), [_p2]"r" (p2), [_p3]"r" (p3),
658 	  [_psp]"r" (psp), [_main_entry]"r" (main_entry)
659 #ifdef CONFIG_BUILTIN_STACK_GUARD
660 	, [_psplim]"r" (psplim)
661 #endif
662 	: "r0", "r1", "r2", "ip", "lr"
663 	);
664 
665 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
666 }
667 #endif /* !CONFIG_MULTITHREADING */
668