1 /*
2 * Copyright (c) 2013-2014 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief New thread creation for ARM Cortex-M and Cortex-R
10 *
11 * Core thread related primitives for the ARM Cortex-M and Cortex-R
12 * processor architecture.
13 */
14
15 #include <kernel.h>
16 #include <ksched.h>
17 #include <wait_q.h>
18
19 #if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE)
20 #define FP_GUARD_EXTRA_SIZE (MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \
21 MPU_GUARD_ALIGN_AND_SIZE)
22 #else
23 #define FP_GUARD_EXTRA_SIZE 0
24 #endif
25
26 #ifndef EXC_RETURN_FTYPE
27 /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */
28 #define EXC_RETURN_FTYPE (0x00000010UL)
29 #endif
30
31 /* Default last octet of EXC_RETURN, for threads that have not run yet.
32 * The full EXC_RETURN value will be e.g. 0xFFFFFFBC.
33 */
34 #if defined(CONFIG_ARM_NONSECURE_FIRMWARE)
35 #define DEFAULT_EXC_RETURN 0xBC;
36 #else
37 #define DEFAULT_EXC_RETURN 0xFD;
38 #endif
39
40 #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_CPU_CORTEX_M)
41 extern K_THREAD_STACK_DEFINE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
42 #endif
43
44 /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
45 * end of the stack, and thus reusable by the stack when not needed anymore.
46 *
47 * The initial context is an exception stack frame (ESF) since exiting the
48 * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
49 * an instruction address to jump to must always be set since the CPU always
50 * runs in thumb mode, the ESF expects the real address of the instruction,
51 * with the lsb *not* set (instructions are always aligned on 16 bit
52 * halfwords). Since the compiler automatically sets the lsb of function
53 * addresses, we have to unset it manually before storing it in the 'pc' field
54 * of the ESF.
55 */
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)56 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
57 char *stack_ptr, k_thread_entry_t entry,
58 void *p1, void *p2, void *p3)
59 {
60 struct __basic_sf *iframe;
61
62 #ifdef CONFIG_MPU_STACK_GUARD
63 #if defined(CONFIG_USERSPACE)
64 if (z_stack_is_user_capable(stack)) {
65 /* Guard area is carved-out of the buffer instead of reserved
66 * for stacks that can host user threads
67 */
68 thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
69 thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
70 }
71 #endif /* CONFIG_USERSPACE */
72 #if FP_GUARD_EXTRA_SIZE > 0
73 if ((thread->base.user_options & K_FP_REGS) != 0) {
74 /* Larger guard needed due to lazy stacking of FP regs may
75 * overshoot the guard area without writing anything. We
76 * carve it out of the stack buffer as-needed instead of
77 * unconditionally reserving it.
78 */
79 thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
80 thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
81 }
82 #endif /* FP_GUARD_EXTRA_SIZE */
83 #endif /* CONFIG_MPU_STACK_GUARD */
84
85 iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
86 #if defined(CONFIG_USERSPACE)
87 if ((thread->base.user_options & K_USER) != 0) {
88 iframe->pc = (uint32_t)arch_user_mode_enter;
89 } else {
90 iframe->pc = (uint32_t)z_thread_entry;
91 }
92 #else
93 iframe->pc = (uint32_t)z_thread_entry;
94 #endif
95
96 #if defined(CONFIG_CPU_CORTEX_M)
97 /* force ARM mode by clearing LSB of address */
98 iframe->pc &= 0xfffffffe;
99 #endif
100 iframe->a1 = (uint32_t)entry;
101 iframe->a2 = (uint32_t)p1;
102 iframe->a3 = (uint32_t)p2;
103 iframe->a4 = (uint32_t)p3;
104
105 #if defined(CONFIG_CPU_CORTEX_M)
106 iframe->xpsr =
107 0x01000000UL; /* clear all, thumb bit is 1, even if RO */
108 #else
109 iframe->xpsr = A_BIT | MODE_SYS;
110 #if defined(CONFIG_COMPILER_ISA_THUMB2)
111 iframe->xpsr |= T_BIT;
112 #endif /* CONFIG_COMPILER_ISA_THUMB2 */
113 #endif /* CONFIG_CPU_CORTEX_M */
114
115 thread->callee_saved.psp = (uint32_t)iframe;
116 thread->arch.basepri = 0;
117
118 #if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
119 thread->arch.mode = 0;
120 #if defined(CONFIG_ARM_STORE_EXC_RETURN)
121 thread->arch.mode_exc_return = DEFAULT_EXC_RETURN;
122 #endif
123 #if FP_GUARD_EXTRA_SIZE > 0
124 if ((thread->base.user_options & K_FP_REGS) != 0) {
125 thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
126 }
127 #endif
128 #if defined(CONFIG_USERSPACE)
129 thread->arch.priv_stack_start = 0;
130 #endif
131 #endif
132 /*
133 * initial values in all other registers/thread entries are
134 * irrelevant.
135 */
136 }
137
138 #if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \
139 && defined(CONFIG_FPU_SHARING)
140
z_arm_thread_stack_info_adjust(struct k_thread * thread,bool use_large_guard)141 static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
142 bool use_large_guard)
143 {
144 if (use_large_guard) {
145 /* Switch to use a large MPU guard if not already. */
146 if ((thread->arch.mode &
147 Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) {
148 /* Default guard size is used. Update required. */
149 thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
150 #if defined(CONFIG_USERSPACE)
151 if (thread->arch.priv_stack_start) {
152 /* User thread */
153 thread->arch.priv_stack_start +=
154 FP_GUARD_EXTRA_SIZE;
155 } else
156 #endif /* CONFIG_USERSPACE */
157 {
158 /* Privileged thread */
159 thread->stack_info.start +=
160 FP_GUARD_EXTRA_SIZE;
161 thread->stack_info.size -=
162 FP_GUARD_EXTRA_SIZE;
163 }
164 }
165 } else {
166 /* Switch to use the default MPU guard size if not already. */
167 if ((thread->arch.mode &
168 Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
169 /* Large guard size is used. Update required. */
170 thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
171 #if defined(CONFIG_USERSPACE)
172 if (thread->arch.priv_stack_start) {
173 /* User thread */
174 thread->arch.priv_stack_start -=
175 FP_GUARD_EXTRA_SIZE;
176 } else
177 #endif /* CONFIG_USERSPACE */
178 {
179 /* Privileged thread */
180 thread->stack_info.start -=
181 FP_GUARD_EXTRA_SIZE;
182 thread->stack_info.size +=
183 FP_GUARD_EXTRA_SIZE;
184 }
185 }
186 }
187 }
188
189 /*
190 * Adjust the MPU stack guard size together with the FPU
191 * policy and the stack_info values for the thread that is
192 * being switched in.
193 */
z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread * thread)194 uint32_t z_arm_mpu_stack_guard_and_fpu_adjust(struct k_thread *thread)
195 {
196 if (((thread->base.user_options & K_FP_REGS) != 0) ||
197 ((thread->arch.mode_exc_return & EXC_RETURN_FTYPE) == 0)) {
198 /* The thread has been pre-tagged (at creation or later) with
199 * K_FP_REGS, i.e. it is expected to be using the FPU registers
200 * (if not already). Activate lazy stacking and program a large
201 * MPU guard to safely detect privilege thread stack overflows.
202 *
203 * OR
204 * The thread is not pre-tagged with K_FP_REGS, but it has
205 * generated an FP context. Activate lazy stacking and
206 * program a large MPU guard to detect privilege thread
207 * stack overflows.
208 */
209 FPU->FPCCR |= FPU_FPCCR_LSPEN_Msk;
210
211 z_arm_thread_stack_info_adjust(thread, true);
212
213 /* Tag the thread with K_FP_REGS */
214 thread->base.user_options |= K_FP_REGS;
215
216 return MPU_GUARD_ALIGN_AND_SIZE_FLOAT;
217 }
218
219 /* Thread is not pre-tagged with K_FP_REGS, and it has
220 * not been using the FPU. Since there is no active FPU
221 * context, de-activate lazy stacking and program the
222 * default MPU guard size.
223 */
224 FPU->FPCCR &= (~FPU_FPCCR_LSPEN_Msk);
225
226 z_arm_thread_stack_info_adjust(thread, false);
227
228 return MPU_GUARD_ALIGN_AND_SIZE;
229 }
230 #endif
231
232 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)233 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
234 void *p1, void *p2, void *p3)
235 {
236
237 /* Set up privileged stack before entering user mode */
238 _current->arch.priv_stack_start =
239 (uint32_t)z_priv_stack_find(_current->stack_obj);
240 #if defined(CONFIG_MPU_STACK_GUARD)
241 #if defined(CONFIG_THREAD_STACK_INFO)
242 /* We're dropping to user mode which means the guard area is no
243 * longer used here, it instead is moved to the privilege stack
244 * to catch stack overflows there. Un-do the calculations done
245 * which accounted for memory borrowed from the thread stack.
246 */
247 #if FP_GUARD_EXTRA_SIZE > 0
248 if ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
249 _current->stack_info.start -= FP_GUARD_EXTRA_SIZE;
250 _current->stack_info.size += FP_GUARD_EXTRA_SIZE;
251 }
252 #endif /* FP_GUARD_EXTRA_SIZE */
253 _current->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
254 _current->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
255 #endif /* CONFIG_THREAD_STACK_INFO */
256
257 /* Stack guard area reserved at the bottom of the thread's
258 * privileged stack. Adjust the available (writable) stack
259 * buffer area accordingly.
260 */
261 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
262 _current->arch.priv_stack_start +=
263 ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
264 MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
265 #else
266 _current->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
267 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
268 #endif /* CONFIG_MPU_STACK_GUARD */
269
270 #if defined(CONFIG_CPU_CORTEX_R)
271 _current->arch.priv_stack_end =
272 _current->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
273 #endif
274
275 z_arm_userspace_enter(user_entry, p1, p2, p3,
276 (uint32_t)_current->stack_info.start,
277 _current->stack_info.size -
278 _current->stack_info.delta);
279 CODE_UNREACHABLE;
280 }
281
282 #endif
283
284 #if defined(CONFIG_BUILTIN_STACK_GUARD)
285 /*
286 * @brief Configure ARM built-in stack guard
287 *
288 * This function configures per thread stack guards by reprogramming
289 * the built-in Process Stack Pointer Limit Register (PSPLIM).
290 * The functionality is meant to be used during context switch.
291 *
292 * @param thread thread info data structure.
293 */
configure_builtin_stack_guard(struct k_thread * thread)294 void configure_builtin_stack_guard(struct k_thread *thread)
295 {
296 #if defined(CONFIG_USERSPACE)
297 if ((thread->arch.mode & CONTROL_nPRIV_Msk) != 0) {
298 /* Only configure stack limit for threads in privileged mode
299 * (i.e supervisor threads or user threads doing system call).
300 * User threads executing in user mode do not require a stack
301 * limit protection.
302 */
303 __set_PSPLIM(0);
304 return;
305 }
306 /* Only configure PSPLIM to guard the privileged stack area, if
307 * the thread is currently using it, otherwise guard the default
308 * thread stack. Note that the conditional check relies on the
309 * thread privileged stack being allocated in higher memory area
310 * than the default thread stack (ensured by design).
311 */
312 uint32_t guard_start =
313 ((thread->arch.priv_stack_start) &&
314 (__get_PSP() >= thread->arch.priv_stack_start)) ?
315 (uint32_t)thread->arch.priv_stack_start :
316 (uint32_t)thread->stack_obj;
317
318 __ASSERT(thread->stack_info.start == ((uint32_t)thread->stack_obj),
319 "stack_info.start does not point to the start of the"
320 "thread allocated area.");
321 #else
322 uint32_t guard_start = thread->stack_info.start;
323 #endif
324 #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
325 __set_PSPLIM(guard_start);
326 #else
327 #error "Built-in PSP limit checks not supported by HW"
328 #endif
329 }
330 #endif /* CONFIG_BUILTIN_STACK_GUARD */
331
332 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
333
334 #define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr) \
335 ((fault_addr != -EINVAL) ? \
336 ((fault_addr >= guard_start) && \
337 (fault_addr < (guard_start + guard_len)) && \
338 (stack_ptr < (guard_start + guard_len))) \
339 : \
340 (stack_ptr < (guard_start + guard_len)))
341
342 /**
343 * @brief Assess occurrence of current thread's stack corruption
344 *
345 * This function performs an assessment whether a memory fault (on a
346 * given memory address) is the result of stack memory corruption of
347 * the current thread.
348 *
349 * Thread stack corruption for supervisor threads or user threads in
350 * privilege mode (when User Space is supported) is reported upon an
351 * attempt to access the stack guard area (if MPU Stack Guard feature
352 * is supported). Additionally the current PSP (process stack pointer)
353 * must be pointing inside or below the guard area.
354 *
355 * Thread stack corruption for user threads in user mode is reported,
356 * if the current PSP is pointing below the start of the current
357 * thread's stack.
358 *
359 * Notes:
360 * - we assume a fully descending stack,
361 * - we assume a stacking error has occurred,
362 * - the function shall be called when handling MemManage and Bus fault,
363 * and only if a Stacking error has been reported.
364 *
365 * If stack corruption is detected, the function returns the lowest
366 * allowed address where the Stack Pointer can safely point to, to
367 * prevent from errors when un-stacking the corrupted stack frame
368 * upon exception return.
369 *
370 * @param fault_addr memory address on which memory access violation
371 * has been reported. It can be invalid (-EINVAL),
372 * if only Stacking error has been reported.
373 * @param psp current address the PSP points to
374 *
375 * @return The lowest allowed stack frame pointer, if error is a
376 * thread stack corruption, otherwise return 0.
377 */
z_check_thread_stack_fail(const uint32_t fault_addr,const uint32_t psp)378 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
379 {
380 #if defined(CONFIG_MULTITHREADING)
381 const struct k_thread *thread = _current;
382
383 if (thread == NULL) {
384 return 0;
385 }
386 #endif
387
388 #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
389 defined(CONFIG_MPU_STACK_GUARD)
390 uint32_t guard_len =
391 ((_current->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
392 MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
393 #else
394 /* If MPU_STACK_GUARD is not enabled, the guard length is
395 * effectively zero. Stack overflows may be detected only
396 * for user threads in nPRIV mode.
397 */
398 uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE;
399 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
400
401 #if defined(CONFIG_USERSPACE)
402 if (thread->arch.priv_stack_start) {
403 /* User thread */
404 if (z_arm_thread_is_in_user_mode() == false) {
405 /* User thread in privilege mode */
406 if (IS_MPU_GUARD_VIOLATION(
407 thread->arch.priv_stack_start - guard_len,
408 guard_len,
409 fault_addr, psp)) {
410 /* Thread's privilege stack corruption */
411 return thread->arch.priv_stack_start;
412 }
413 } else {
414 if (psp < (uint32_t)thread->stack_obj) {
415 /* Thread's user stack corruption */
416 return (uint32_t)thread->stack_obj;
417 }
418 }
419 } else {
420 /* Supervisor thread */
421 if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start -
422 guard_len,
423 guard_len,
424 fault_addr, psp)) {
425 /* Supervisor thread stack corruption */
426 return thread->stack_info.start;
427 }
428 }
429 #else /* CONFIG_USERSPACE */
430 #if defined(CONFIG_MULTITHREADING)
431 if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len,
432 guard_len,
433 fault_addr, psp)) {
434 /* Thread stack corruption */
435 return thread->stack_info.start;
436 }
437 #else
438 if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack,
439 guard_len,
440 fault_addr, psp)) {
441 /* Thread stack corruption */
442 return (uint32_t)Z_THREAD_STACK_BUFFER(z_main_stack);
443 }
444 #endif
445 #endif /* CONFIG_USERSPACE */
446
447 return 0;
448 }
449 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
450
451 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)452 int arch_float_disable(struct k_thread *thread)
453 {
454 if (thread != _current) {
455 return -EINVAL;
456 }
457
458 if (arch_is_in_isr()) {
459 return -EINVAL;
460 }
461
462 /* Disable all floating point capabilities for the thread */
463
464 /* K_FP_REG flag is used in SWAP and stack check fail. Locking
465 * interrupts here prevents a possible context-switch or MPU
466 * fault to take an outdated thread user_options flag into
467 * account.
468 */
469 int key = arch_irq_lock();
470
471 thread->base.user_options &= ~K_FP_REGS;
472
473 __set_CONTROL(__get_CONTROL() & (~CONTROL_FPCA_Msk));
474
475 /* No need to add an ISB barrier after setting the CONTROL
476 * register; arch_irq_unlock() already adds one.
477 */
478
479 arch_irq_unlock(key);
480
481 return 0;
482 }
483
arch_float_enable(struct k_thread * thread,unsigned int options)484 int arch_float_enable(struct k_thread *thread, unsigned int options)
485 {
486 /* This is not supported in Cortex-M and Cortex-R does not have FPU */
487 return -ENOTSUP;
488 }
489 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
490
491 /* Internal function for Cortex-M initialization,
492 * applicable to either case of running Zephyr
493 * with or without multi-threading support.
494 */
z_arm_prepare_switch_to_main(void)495 static void z_arm_prepare_switch_to_main(void)
496 {
497 #if defined(CONFIG_FPU)
498 /* Initialize the Floating Point Status and Control Register when in
499 * Unshared FP Registers mode (In Shared FP Registers mode, FPSCR is
500 * initialized at thread creation for threads that make use of the FP).
501 */
502 __set_FPSCR(0);
503 #if defined(CONFIG_FPU_SHARING)
504 /* In Sharing mode clearing FPSCR may set the CONTROL.FPCA flag. */
505 __set_CONTROL(__get_CONTROL() & (~(CONTROL_FPCA_Msk)));
506 __ISB();
507 #endif /* CONFIG_FPU_SHARING */
508 #endif /* CONFIG_FPU */
509 }
510
arch_switch_to_main_thread(struct k_thread * main_thread,char * stack_ptr,k_thread_entry_t _main)511 void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
512 k_thread_entry_t _main)
513 {
514 z_arm_prepare_switch_to_main();
515
516 _current = main_thread;
517
518 #if defined(CONFIG_THREAD_LOCAL_STORAGE) && defined(CONFIG_CPU_CORTEX_M)
519 /* On Cortex-M, TLS uses a global variable as pointer to
520 * the thread local storage area. So this needs to point
521 * to the main thread's TLS area before switching to any
522 * thread for the first time, as the pointer is only set
523 * during context switching.
524 */
525 extern uintptr_t z_arm_tls_ptr;
526
527 z_arm_tls_ptr = main_thread->tls;
528 #endif
529
530 #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
531 z_thread_mark_switched_in();
532 #endif
533
534 /* the ready queue cache already contains the main thread */
535
536 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
537 /*
538 * If stack protection is enabled, make sure to set it
539 * before jumping to thread entry function
540 */
541 z_arm_configure_dynamic_mpu_regions(main_thread);
542 #endif
543
544 #if defined(CONFIG_BUILTIN_STACK_GUARD)
545 /* Set PSPLIM register for built-in stack guarding of main thread. */
546 #if defined(CONFIG_CPU_CORTEX_M_HAS_SPLIM)
547 __set_PSPLIM(main_thread->stack_info.start);
548 #else
549 #error "Built-in PSP limit checks not supported by HW"
550 #endif
551 #endif /* CONFIG_BUILTIN_STACK_GUARD */
552
553 /*
554 * Set PSP to the highest address of the main stack
555 * before enabling interrupts and jumping to main.
556 */
557 __asm__ volatile (
558 "mov r0, %0\n\t" /* Store _main in R0 */
559 #if defined(CONFIG_CPU_CORTEX_M)
560 "msr PSP, %1\n\t" /* __set_PSP(stack_ptr) */
561 #endif
562
563 "movs r1, #0\n\t"
564 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE) \
565 || defined(CONFIG_ARMV7_R)
566 "cpsie i\n\t" /* __enable_irq() */
567 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
568 "cpsie if\n\t" /* __enable_irq(); __enable_fault_irq() */
569 "msr BASEPRI, r1\n\t" /* __set_BASEPRI(0) */
570 #else
571 #error Unknown ARM architecture
572 #endif /* CONFIG_ARMV6_M_ARMV8_M_BASELINE */
573 "isb\n\t"
574 "movs r2, #0\n\t"
575 "movs r3, #0\n\t"
576 "bl z_thread_entry\n\t" /* z_thread_entry(_main, 0, 0, 0); */
577 :
578 : "r" (_main), "r" (stack_ptr)
579 : "r0" /* not to be overwritten by msr PSP, %1 */
580 );
581
582 CODE_UNREACHABLE;
583 }
584
585 #if !defined(CONFIG_MULTITHREADING) && defined(CONFIG_CPU_CORTEX_M)
586
z_arm_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)587 FUNC_NORETURN void z_arm_switch_to_main_no_multithreading(
588 k_thread_entry_t main_entry, void *p1, void *p2, void *p3)
589 {
590 z_arm_prepare_switch_to_main();
591
592 /* Set PSP to the highest address of the main stack. */
593 char *psp = Z_THREAD_STACK_BUFFER(z_main_stack) +
594 K_THREAD_STACK_SIZEOF(z_main_stack);
595
596 #if defined(CONFIG_BUILTIN_STACK_GUARD)
597 char *psplim = (Z_THREAD_STACK_BUFFER(z_main_stack));
598 /* Clear PSPLIM before setting it to guard the main stack area. */
599 __set_PSPLIM(0);
600 #endif
601
602 /* Store all required input in registers, to be accesible
603 * after stack pointer change. The function is not going
604 * to return, so callee-saved registers do not need to be
605 * stacked.
606 */
607 register void *p1_inreg __asm__("r0") = p1;
608 register void *p2_inreg __asm__("r1") = p2;
609 register void *p3_inreg __asm__("r2") = p3;
610
611 __asm__ volatile (
612 #ifdef CONFIG_BUILTIN_STACK_GUARD
613 "msr PSPLIM, %[_psplim]\n\t"
614 #endif
615 "msr PSP, %[_psp]\n\t" /* __set_PSP(psp) */
616 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
617 "cpsie i\n\t" /* enable_irq() */
618 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
619 "cpsie if\n\t" /* __enable_irq(); __enable_fault_irq() */
620 "mov r3, #0\n\t"
621 "msr BASEPRI, r3\n\t" /* __set_BASEPRI(0) */
622 #endif
623 "isb\n\t"
624 "blx %[_main_entry]\n\t" /* main_entry(p1, p2, p3) */
625 #if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
626 "cpsid i\n\t" /* disable_irq() */
627 #elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
628 "msr BASEPRI, %[basepri]\n\t"/* __set_BASEPRI(_EXC_IRQ_DEFAULT_PRIO) */
629 "isb\n\t"
630 #endif
631 "loop: b loop\n\t" /* while (true); */
632 :
633 : "r" (p1_inreg), "r" (p2_inreg), "r" (p3_inreg),
634 [_psp]"r" (psp), [_main_entry]"r" (main_entry)
635 #if defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
636 , [basepri] "r" (_EXC_IRQ_DEFAULT_PRIO)
637 #endif
638 #ifdef CONFIG_BUILTIN_STACK_GUARD
639 , [_psplim]"r" (psplim)
640 #endif
641 :
642 );
643
644 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
645 }
646 #endif /* !CONFIG_MULTITHREADING && CONFIG_CPU_CORTEX_M */
647