1 /*
2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief New thread creation for ARM64 Cortex-A
10 *
11 * Core thread related primitives for the ARM64 Cortex-A
12 */
13
14 #include <zephyr/kernel.h>
15 #include <ksched.h>
16 #include <zephyr/arch/cpu.h>
17
18 /*
19 * Note about stack usage:
20 *
21 * [ see also comments in include/arch/arm64/thread_stack.h ]
22 *
23 * - kernel threads are running in EL1 using SP_EL1 as stack pointer during
24 * normal execution and during exceptions. They are by definition already
25 * running in a privileged stack that is their own.
26 *
27 * - user threads are running in EL0 using SP_EL0 as stack pointer during
28 * normal execution. When at exception is taken or a syscall is called the
29 * stack pointer switches to SP_EL1 and the execution starts using the
30 * privileged portion of the user stack without touching SP_EL0. This portion
31 * is marked as not user accessible in the MMU/MPU.
32 *
33 * - a stack guard region will be added bellow the kernel stack when
34 * ARM64_STACK_PROTECTION is enabled. In this case, SP_EL0 will always point
35 * to the safe exception stack in the kernel space. For the kernel thread,
36 * SP_EL0 will not change always pointing to safe exception stack. For the
37 * userspace thread, SP_EL0 will switch from the user stack to the safe
38 * exception stack when entering the EL1 mode, and restore to the user stack
39 * when backing to userspace (EL0).
40 *
41 * Kernel threads:
42 *
43 * High memory addresses
44 *
45 * +---------------+ <- stack_ptr
46 * E | ESF |
47 * L |<<<<<<<<<<<<<<<| <- SP_EL1
48 * 1 | |
49 * +---------------+ <- stack limit
50 * | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU)
51 * +---------------+ <- stack_obj
52 *
53 * Low Memory addresses
54 *
55 *
56 * User threads:
57 *
58 * High memory addresses
59 *
60 * +---------------+ <- stack_ptr
61 * E | |
62 * L |<<<<<<<<<<<<<<<| <- SP_EL0
63 * 0 | |
64 * +---------------+ ..............|
65 * E | ESF | | Privileged portion of the stack
66 * L +>>>>>>>>>>>>>>>+ <- SP_EL1 |_ used during exceptions and syscalls
67 * 1 | | | of size ARCH_THREAD_STACK_RESERVED
68 * +---------------+ <- stack limit|
69 * | Stack guard | } Z_ARM64_STACK_GUARD_SIZE (protected by MMU/MPU)
70 * +---------------+ <- stack_obj
71 *
72 * Low Memory addresses
73 *
74 * When a kernel thread switches to user mode the SP_EL0 and SP_EL1
75 * values are reset accordingly in arch_user_mode_enter().
76 */
77
78 #ifdef CONFIG_USERSPACE
is_user(struct k_thread * thread)79 static bool is_user(struct k_thread *thread)
80 {
81 return (thread->base.user_options & K_USER) != 0;
82 }
83 #endif
84
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)85 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
86 char *stack_ptr, k_thread_entry_t entry,
87 void *p1, void *p2, void *p3)
88 {
89 extern void z_arm64_exit_exc(void);
90 struct arch_esf *pInitCtx;
91
92 /*
93 * Clean the thread->arch to avoid unexpected behavior because the
94 * thread->arch might be dirty
95 */
96 memset(&thread->arch, 0, sizeof(thread->arch));
97
98 /*
99 * The ESF is now hosted at the top of the stack. For user threads this
100 * is also fine because at this stage they are still running in EL1.
101 * The context will be relocated by arch_user_mode_enter() before
102 * dropping into EL0.
103 */
104
105 pInitCtx = Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr);
106
107 pInitCtx->x0 = (uint64_t)entry;
108 pInitCtx->x1 = (uint64_t)p1;
109 pInitCtx->x2 = (uint64_t)p2;
110 pInitCtx->x3 = (uint64_t)p3;
111
112 /*
113 * - ELR_ELn: to be used by eret in z_arm64_exit_exc() to return
114 * to z_thread_entry() with entry in x0(entry_point) and the
115 * parameters already in place in x1(arg1), x2(arg2), x3(arg3).
116 * - SPSR_ELn: to enable IRQs (we are masking FIQs).
117 */
118 #ifdef CONFIG_USERSPACE
119 /*
120 * If the new thread is a user thread we jump into
121 * arch_user_mode_enter() when still in EL1.
122 */
123 if (is_user(thread)) {
124 pInitCtx->elr = (uint64_t)arch_user_mode_enter;
125 } else {
126 pInitCtx->elr = (uint64_t)z_thread_entry;
127 }
128
129 #else
130 pInitCtx->elr = (uint64_t)z_thread_entry;
131 #endif
132
133 /* Keep using SP_EL1 */
134 pInitCtx->spsr = SPSR_MODE_EL1H | DAIF_FIQ_BIT;
135
136 /* thread birth happens through the exception return path */
137 thread->arch.exception_depth = 1;
138
139 /*
140 * We are saving SP_EL1 to pop out entry and parameters when going
141 * through z_arm64_exit_exc(). For user threads the definitive location
142 * of SP_EL1 will be set in arch_user_mode_enter().
143 */
144 thread->callee_saved.sp_elx = (uint64_t)pInitCtx;
145 thread->callee_saved.lr = (uint64_t)z_arm64_exit_exc;
146
147 thread->switch_handle = thread;
148 #if defined(CONFIG_ARM64_STACK_PROTECTION)
149 thread->arch.stack_limit = (uint64_t)stack + Z_ARM64_STACK_GUARD_SIZE;
150 z_arm64_thread_mem_domains_init(thread);
151 #endif
152 }
153
154 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)155 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
156 void *p1, void *p2, void *p3)
157 {
158 uintptr_t stack_el0, stack_el1;
159 uint64_t tmpreg;
160
161 /* Map the thread stack */
162 z_arm64_thread_mem_domains_init(arch_current_thread());
163
164 /* Top of the user stack area */
165 stack_el0 = Z_STACK_PTR_ALIGN(arch_current_thread()->stack_info.start +
166 arch_current_thread()->stack_info.size -
167 arch_current_thread()->stack_info.delta);
168
169 /* Top of the privileged non-user-accessible part of the stack */
170 stack_el1 = (uintptr_t)(arch_current_thread()->stack_obj + ARCH_THREAD_STACK_RESERVED);
171
172 register void *x0 __asm__("x0") = user_entry;
173 register void *x1 __asm__("x1") = p1;
174 register void *x2 __asm__("x2") = p2;
175 register void *x3 __asm__("x3") = p3;
176
177 /* we don't want to be disturbed when playing with SPSR and ELR */
178 arch_irq_lock();
179
180 /* set up and drop into EL0 */
181 __asm__ volatile (
182 "mrs %[tmp], tpidrro_el0\n\t"
183 "orr %[tmp], %[tmp], %[is_usermode_flag]\n\t"
184 "msr tpidrro_el0, %[tmp]\n\t"
185 "msr elr_el1, %[elr]\n\t"
186 "msr spsr_el1, %[spsr]\n\t"
187 "msr sp_el0, %[sp_el0]\n\t"
188 "mov sp, %[sp_el1]\n\t"
189 "eret"
190 : [tmp] "=&r" (tmpreg)
191 : "r" (x0), "r" (x1), "r" (x2), "r" (x3),
192 [is_usermode_flag] "i" (TPIDRROEL0_IN_EL0),
193 [elr] "r" (z_thread_entry),
194 [spsr] "r" (DAIF_FIQ_BIT | SPSR_MODE_EL0T),
195 [sp_el0] "r" (stack_el0),
196 [sp_el1] "r" (stack_el1)
197 : "memory");
198
199 CODE_UNREACHABLE;
200 }
201 #endif
202