1 /*
2 * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3 * Copyright (c) 2020 BayLibre, SAS
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #include <zephyr/kernel.h>
9 #include <ksched.h>
10 #include <zephyr/arch/riscv/csr.h>
11 #include <stdio.h>
12 #include <pmp.h>
13
14 #ifdef CONFIG_USERSPACE
15 /*
16 * Per-thread (TLS) variable indicating whether execution is in user mode.
17 */
18 Z_THREAD_LOCAL uint8_t is_user_mode;
19 #endif
20
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)21 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
22 char *stack_ptr, k_thread_entry_t entry,
23 void *p1, void *p2, void *p3)
24 {
25 extern void z_riscv_thread_start(void);
26 struct arch_esf *stack_init;
27
28 #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
29 const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
30 #endif
31
32 /* Initial stack frame for thread */
33 stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN(
34 Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr)
35 );
36
37 /* Setup the initial stack frame */
38 stack_init->a0 = (unsigned long)entry;
39 stack_init->a1 = (unsigned long)p1;
40 stack_init->a2 = (unsigned long)p2;
41 stack_init->a3 = (unsigned long)p3;
42
43 /*
44 * Following the RISC-V architecture,
45 * the MSTATUS register (used to globally enable/disable interrupt),
46 * as well as the MEPC register (used to by the core to save the
47 * value of the program counter at which an interrupt/exception occurs)
48 * need to be saved on the stack, upon an interrupt/exception
49 * and restored prior to returning from the interrupt/exception.
50 * This shall allow to handle nested interrupts.
51 *
52 * Given that thread startup happens through the exception exit
53 * path, initially set:
54 * 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable
55 * interrupts when the newly created thread will be scheduled;
56 * 2) MEPC to the address of the z_thread_entry in the thread
57 * stack.
58 * Hence, when going out of an interrupt/exception/context-switch,
59 * after scheduling the newly created thread:
60 * 1) interrupts will be enabled, as the MSTATUS register will be
61 * restored following the MSTATUS value set within the thread stack;
62 * 2) the core will jump to z_thread_entry, as the program
63 * counter will be restored following the MEPC value set within the
64 * thread stack.
65 */
66 stack_init->mstatus = MSTATUS_DEF_RESTORE;
67
68 #if defined(CONFIG_FPU_SHARING)
69 /* thread birth happens through the exception return path */
70 thread->arch.exception_depth = 1;
71 #elif defined(CONFIG_FPU)
72 /* Unshared FP mode: enable FPU of each thread. */
73 stack_init->mstatus |= MSTATUS_FS_INIT;
74 #endif
75
76 #if defined(CONFIG_USERSPACE)
77 /* Clear user thread context */
78 z_riscv_pmp_usermode_init(thread);
79 thread->arch.priv_stack_start = 0;
80 #endif /* CONFIG_USERSPACE */
81
82 /* Assign thread entry point and mstatus.MPRV mode. */
83 if (IS_ENABLED(CONFIG_USERSPACE)
84 && (thread->base.user_options & K_USER)) {
85 /* User thread */
86 stack_init->mepc = (unsigned long)k_thread_user_mode_enter;
87
88 } else {
89 /* Supervisor thread */
90 stack_init->mepc = (unsigned long)z_thread_entry;
91
92 #if defined(CONFIG_PMP_STACK_GUARD)
93 /* Enable PMP in mstatus.MPRV mode for RISC-V machine mode
94 * if thread is supervisor thread.
95 */
96 stack_init->mstatus |= MSTATUS_MPRV;
97 #endif /* CONFIG_PMP_STACK_GUARD */
98 }
99
100 #if defined(CONFIG_PMP_STACK_GUARD)
101 /* Setup PMP regions of PMP stack guard of thread. */
102 z_riscv_pmp_stackguard_prepare(thread);
103 #endif /* CONFIG_PMP_STACK_GUARD */
104
105 #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
106 stack_init->soc_context = soc_esf_init;
107 #endif
108
109 thread->callee_saved.sp = (unsigned long)stack_init;
110
111 /* where to go when returning from z_riscv_switch() */
112 thread->callee_saved.ra = (unsigned long)z_riscv_thread_start;
113
114 /* our switch handle is the thread pointer itself */
115 thread->switch_handle = thread;
116 }
117
118 #ifdef CONFIG_USERSPACE
119
120 /*
121 * User space entry function
122 *
123 * This function is the entry point to user mode from privileged execution.
124 * The conversion is one way, and threads which transition to user mode do
125 * not transition back later, unless they are doing system calls.
126 */
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)127 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
128 void *p1, void *p2, void *p3)
129 {
130 unsigned long top_of_user_stack, top_of_priv_stack;
131 unsigned long status;
132
133 /* Set up privileged stack */
134 #ifdef CONFIG_GEN_PRIV_STACKS
135 arch_current_thread()->arch.priv_stack_start =
136 (unsigned long)z_priv_stack_find(arch_current_thread()->stack_obj);
137 /* remove the stack guard from the main stack */
138 arch_current_thread()->stack_info.start -= K_THREAD_STACK_RESERVED;
139 arch_current_thread()->stack_info.size += K_THREAD_STACK_RESERVED;
140 #else
141 arch_current_thread()->arch.priv_stack_start =
142 (unsigned long)arch_current_thread()->stack_obj;
143 #endif /* CONFIG_GEN_PRIV_STACKS */
144 top_of_priv_stack = Z_STACK_PTR_ALIGN(arch_current_thread()->arch.priv_stack_start +
145 K_KERNEL_STACK_RESERVED +
146 CONFIG_PRIVILEGED_STACK_SIZE);
147
148 #ifdef CONFIG_INIT_STACKS
149 /* Initialize the privileged stack */
150 (void)memset((void *)arch_current_thread()->arch.priv_stack_start, 0xaa,
151 Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
152 #endif /* CONFIG_INIT_STACKS */
153
154 top_of_user_stack = Z_STACK_PTR_ALIGN(
155 arch_current_thread()->stack_info.start +
156 arch_current_thread()->stack_info.size -
157 arch_current_thread()->stack_info.delta);
158
159 status = csr_read(mstatus);
160
161 /* Set next CPU status to user mode */
162 status = INSERT_FIELD(status, MSTATUS_MPP, PRV_U);
163 /* Enable IRQs for user mode */
164 status = INSERT_FIELD(status, MSTATUS_MPIE, 1);
165 /* Disable IRQs for m-mode until the mode switch */
166 status = INSERT_FIELD(status, MSTATUS_MIE, 0);
167
168 csr_write(mstatus, status);
169 csr_write(mepc, z_thread_entry);
170
171 #ifdef CONFIG_PMP_STACK_GUARD
172 /* reconfigure as the kernel mode stack will be different */
173 z_riscv_pmp_stackguard_prepare(arch_current_thread());
174 #endif
175
176 /* Set up Physical Memory Protection */
177 z_riscv_pmp_usermode_prepare(arch_current_thread());
178 z_riscv_pmp_usermode_enable(arch_current_thread());
179
180 /* preserve stack pointer for next exception entry */
181 arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;
182
183 is_user_mode = true;
184
185 register void *a0 __asm__("a0") = user_entry;
186 register void *a1 __asm__("a1") = p1;
187 register void *a2 __asm__("a2") = p2;
188 register void *a3 __asm__("a3") = p3;
189
190 __asm__ volatile (
191 "mv sp, %4; mret"
192 :
193 : "r" (a0), "r" (a1), "r" (a2), "r" (a3), "r" (top_of_user_stack)
194 : "memory");
195
196 CODE_UNREACHABLE;
197 }
198
arch_thread_priv_stack_space_get(const struct k_thread * thread,size_t * stack_size,size_t * unused_ptr)199 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
200 size_t *unused_ptr)
201 {
202 if ((thread->base.user_options & K_USER) != K_USER) {
203 return -EINVAL;
204 }
205
206 *stack_size = Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE);
207
208 return z_stack_space_get((void *)thread->arch.priv_stack_start, *stack_size, unused_ptr);
209 }
210
211 #endif /* CONFIG_USERSPACE */
212
213 #ifndef CONFIG_MULTITHREADING
214
215 K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
216 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
217
z_riscv_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)218 FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading(k_thread_entry_t main_entry,
219 void *p1, void *p2, void *p3)
220 {
221 void *main_stack;
222
223 ARG_UNUSED(p1);
224 ARG_UNUSED(p2);
225 ARG_UNUSED(p3);
226
227 _kernel.cpus[0].id = 0;
228 _kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
229 K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
230
231 main_stack = (K_THREAD_STACK_BUFFER(z_main_stack) +
232 K_THREAD_STACK_SIZEOF(z_main_stack));
233
234 irq_unlock(MSTATUS_IEN);
235
236 __asm__ volatile (
237 "mv sp, %0; jalr ra, %1, 0"
238 :
239 : "r" (main_stack), "r" (main_entry)
240 : "memory");
241
242 /* infinite loop */
243 irq_lock();
244 while (true) {
245 }
246
247 CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
248 }
249 #endif /* !CONFIG_MULTITHREADING */
250