1 /*
2  * Copyright (c) 2016 Jean-Paul Etienne <fractalclone@gmail.com>
3  * Copyright (c) 2020 BayLibre, SAS
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <ksched.h>
10 #include <zephyr/arch/riscv/csr.h>
11 #include <stdio.h>
12 #include <pmp.h>
13 
14 #ifdef CONFIG_USERSPACE
15 /*
16  * Per-thread (TLS) variable indicating whether execution is in user mode.
17  */
18 Z_THREAD_LOCAL uint8_t is_user_mode;
19 #endif
20 
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)21 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
22 		     char *stack_ptr, k_thread_entry_t entry,
23 		     void *p1, void *p2, void *p3)
24 {
25 	extern void z_riscv_thread_start(void);
26 	struct arch_esf *stack_init;
27 
28 #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
29 	const struct soc_esf soc_esf_init = {SOC_ESF_INIT};
30 #endif
31 
32 	/* Initial stack frame for thread */
33 	stack_init = (struct arch_esf *)Z_STACK_PTR_ALIGN(
34 				Z_STACK_PTR_TO_FRAME(struct arch_esf, stack_ptr)
35 				);
36 
37 	/* Setup the initial stack frame */
38 	stack_init->a0 = (unsigned long)entry;
39 	stack_init->a1 = (unsigned long)p1;
40 	stack_init->a2 = (unsigned long)p2;
41 	stack_init->a3 = (unsigned long)p3;
42 
43 	/*
44 	 * Following the RISC-V architecture,
45 	 * the MSTATUS register (used to globally enable/disable interrupt),
46 	 * as well as the MEPC register (used to by the core to save the
47 	 * value of the program counter at which an interrupt/exception occurs)
48 	 * need to be saved on the stack, upon an interrupt/exception
49 	 * and restored prior to returning from the interrupt/exception.
50 	 * This shall allow to handle nested interrupts.
51 	 *
52 	 * Given that thread startup happens through the exception exit
53 	 * path, initially set:
54 	 * 1) MSTATUS to MSTATUS_DEF_RESTORE in the thread stack to enable
55 	 *    interrupts when the newly created thread will be scheduled;
56 	 * 2) MEPC to the address of the z_thread_entry in the thread
57 	 *    stack.
58 	 * Hence, when going out of an interrupt/exception/context-switch,
59 	 * after scheduling the newly created thread:
60 	 * 1) interrupts will be enabled, as the MSTATUS register will be
61 	 *    restored following the MSTATUS value set within the thread stack;
62 	 * 2) the core will jump to z_thread_entry, as the program
63 	 *    counter will be restored following the MEPC value set within the
64 	 *    thread stack.
65 	 */
66 	stack_init->mstatus = MSTATUS_DEF_RESTORE;
67 
68 #if defined(CONFIG_FPU_SHARING)
69 	/* thread birth happens through the exception return path */
70 	thread->arch.exception_depth = 1;
71 #elif defined(CONFIG_FPU)
72 	/* Unshared FP mode: enable FPU of each thread. */
73 	stack_init->mstatus |= MSTATUS_FS_INIT;
74 #endif
75 
76 #if defined(CONFIG_USERSPACE)
77 	/* Clear user thread context */
78 	z_riscv_pmp_usermode_init(thread);
79 	thread->arch.priv_stack_start = 0;
80 #endif /* CONFIG_USERSPACE */
81 
82 	/* Assign thread entry point and mstatus.MPRV mode. */
83 	if (IS_ENABLED(CONFIG_USERSPACE)
84 	    && (thread->base.user_options & K_USER)) {
85 		/* User thread */
86 		stack_init->mepc = (unsigned long)k_thread_user_mode_enter;
87 
88 	} else {
89 		/* Supervisor thread */
90 		stack_init->mepc = (unsigned long)z_thread_entry;
91 
92 #if defined(CONFIG_PMP_STACK_GUARD)
93 		/* Enable PMP in mstatus.MPRV mode for RISC-V machine mode
94 		 * if thread is supervisor thread.
95 		 */
96 		stack_init->mstatus |= MSTATUS_MPRV;
97 #endif /* CONFIG_PMP_STACK_GUARD */
98 	}
99 
100 #if defined(CONFIG_PMP_STACK_GUARD)
101 	/* Setup PMP regions of PMP stack guard of thread. */
102 	z_riscv_pmp_stackguard_prepare(thread);
103 #endif /* CONFIG_PMP_STACK_GUARD */
104 
105 #ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
106 	stack_init->soc_context = soc_esf_init;
107 #endif
108 
109 #ifdef CONFIG_RISCV_SOC_HAS_ISR_STACKING
110 	SOC_ISR_STACKING_ESR_INIT;
111 #endif
112 
113 #ifdef CONFIG_CLIC_SUPPORT_INTERRUPT_LEVEL
114 	/* Clear the previous interrupt level. */
115 	stack_init->mcause = 0;
116 #endif
117 
118 	thread->callee_saved.sp = (unsigned long)stack_init;
119 
120 	/* where to go when returning from z_riscv_switch() */
121 	thread->callee_saved.ra = (unsigned long)z_riscv_thread_start;
122 
123 	/* our switch handle is the thread pointer itself */
124 	thread->switch_handle = thread;
125 }
126 
127 #ifdef CONFIG_USERSPACE
128 
129 /*
130  * User space entry function
131  *
132  * This function is the entry point to user mode from privileged execution.
133  * The conversion is one way, and threads which transition to user mode do
134  * not transition back later, unless they are doing system calls.
135  */
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)136 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
137 					void *p1, void *p2, void *p3)
138 {
139 	unsigned long top_of_user_stack, top_of_priv_stack;
140 	unsigned long status;
141 
142 	/* Set up privileged stack */
143 #ifdef CONFIG_GEN_PRIV_STACKS
144 	_current->arch.priv_stack_start =
145 			(unsigned long)z_priv_stack_find(_current->stack_obj);
146 	/* remove the stack guard from the main stack */
147 	_current->stack_info.start -= K_THREAD_STACK_RESERVED;
148 	_current->stack_info.size += K_THREAD_STACK_RESERVED;
149 #else
150 	_current->arch.priv_stack_start = (unsigned long)_current->stack_obj;
151 #endif /* CONFIG_GEN_PRIV_STACKS */
152 	top_of_priv_stack = Z_STACK_PTR_ALIGN(_current->arch.priv_stack_start +
153 					      K_KERNEL_STACK_RESERVED +
154 					      CONFIG_PRIVILEGED_STACK_SIZE);
155 
156 #ifdef CONFIG_INIT_STACKS
157 	/* Initialize the privileged stack */
158 	(void)memset((void *)_current->arch.priv_stack_start, 0xaa,
159 		     Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE));
160 #endif /* CONFIG_INIT_STACKS */
161 
162 	top_of_user_stack = Z_STACK_PTR_ALIGN(
163 				_current->stack_info.start +
164 				_current->stack_info.size -
165 				_current->stack_info.delta);
166 
167 	status = csr_read(mstatus);
168 
169 	/* Set next CPU status to user mode */
170 	status = INSERT_FIELD(status, MSTATUS_MPP, PRV_U);
171 	/* Enable IRQs for user mode */
172 	status = INSERT_FIELD(status, MSTATUS_MPIE, 1);
173 	/* Disable IRQs for m-mode until the mode switch */
174 	status = INSERT_FIELD(status, MSTATUS_MIE, 0);
175 
176 	csr_write(mstatus, status);
177 	csr_write(mepc, z_thread_entry);
178 
179 #ifdef CONFIG_PMP_STACK_GUARD
180 	/* reconfigure as the kernel mode stack will be different */
181 	z_riscv_pmp_stackguard_prepare(_current);
182 #endif
183 
184 	/* Set up Physical Memory Protection */
185 	z_riscv_pmp_usermode_prepare(_current);
186 	z_riscv_pmp_usermode_enable(_current);
187 
188 	/* preserve stack pointer for next exception entry */
189 	arch_curr_cpu()->arch.user_exc_sp = top_of_priv_stack;
190 
191 	is_user_mode = true;
192 
193 	register void *a0 __asm__("a0") = user_entry;
194 	register void *a1 __asm__("a1") = p1;
195 	register void *a2 __asm__("a2") = p2;
196 	register void *a3 __asm__("a3") = p3;
197 
198 	__asm__ volatile (
199 	"mv sp, %4; mret"
200 	:
201 	: "r" (a0), "r" (a1), "r" (a2), "r" (a3), "r" (top_of_user_stack)
202 	: "memory");
203 
204 	CODE_UNREACHABLE;
205 }
206 
arch_thread_priv_stack_space_get(const struct k_thread * thread,size_t * stack_size,size_t * unused_ptr)207 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
208 				     size_t *unused_ptr)
209 {
210 	if ((thread->base.user_options & K_USER) != K_USER) {
211 		return -EINVAL;
212 	}
213 
214 	*stack_size = Z_STACK_PTR_ALIGN(K_KERNEL_STACK_RESERVED + CONFIG_PRIVILEGED_STACK_SIZE);
215 
216 	return z_stack_space_get((void *)thread->arch.priv_stack_start, *stack_size, unused_ptr);
217 }
218 
219 #endif /* CONFIG_USERSPACE */
220 
221 #ifndef CONFIG_MULTITHREADING
222 
223 K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
224 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
225 
z_riscv_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)226 FUNC_NORETURN void z_riscv_switch_to_main_no_multithreading(k_thread_entry_t main_entry,
227 							    void *p1, void *p2, void *p3)
228 {
229 	void *main_stack;
230 
231 	ARG_UNUSED(p1);
232 	ARG_UNUSED(p2);
233 	ARG_UNUSED(p3);
234 
235 	_kernel.cpus[0].id = 0;
236 	_kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
237 				     K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
238 
239 	main_stack = (K_THREAD_STACK_BUFFER(z_main_stack) +
240 		      K_THREAD_STACK_SIZEOF(z_main_stack));
241 
242 	irq_unlock(MSTATUS_IEN);
243 
244 	__asm__ volatile (
245 	"mv sp, %0; jalr ra, %1, 0"
246 	:
247 	: "r" (main_stack), "r" (main_entry)
248 	: "memory");
249 
250 	/* infinite loop */
251 	irq_lock();
252 	while (true) {
253 	}
254 
255 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
256 }
257 #endif /* !CONFIG_MULTITHREADING */
258