1 /*
2  * Copyright (c) 2013-2014 Wind River Systems, Inc.
3  * Copyright (c) 2021 Lexmark International, Inc.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  * @brief New thread creation for ARM Cortex-A and Cortex-R
11  *
12  * Core thread related primitives for the ARM Cortex-A and
13  * Cortex-R processor architecture.
14  */
15 
16 #include <zephyr/kernel.h>
17 #include <zephyr/llext/symbol.h>
18 #include <ksched.h>
19 #include <zephyr/sys/barrier.h>
20 #include <stdbool.h>
21 #include <cmsis_core.h>
22 
23 #if (MPU_GUARD_ALIGN_AND_SIZE_FLOAT > MPU_GUARD_ALIGN_AND_SIZE)
24 #define FP_GUARD_EXTRA_SIZE	(MPU_GUARD_ALIGN_AND_SIZE_FLOAT - \
25 				 MPU_GUARD_ALIGN_AND_SIZE)
26 #else
27 #define FP_GUARD_EXTRA_SIZE	0
28 #endif
29 
30 #ifndef EXC_RETURN_FTYPE
31 /* bit [4] allocate stack for floating-point context: 0=done 1=skipped  */
32 #define EXC_RETURN_FTYPE           (0x00000010UL)
33 #endif
34 
35 /* Default last octet of EXC_RETURN, for threads that have not run yet.
36  * The full EXC_RETURN value will be e.g. 0xFFFFFFBC.
37  */
38 #define DEFAULT_EXC_RETURN 0xFD;
39 
40 /* An initial context, to be "restored" by z_arm_pendsv(), is put at the other
41  * end of the stack, and thus reusable by the stack when not needed anymore.
42  *
43  * The initial context is an exception stack frame (ESF) since exiting the
44  * PendSV exception will want to pop an ESF. Interestingly, even if the lsb of
45  * an instruction address to jump to must always be set since the CPU always
46  * runs in thumb mode, the ESF expects the real address of the instruction,
47  * with the lsb *not* set (instructions are always aligned on 16 bit
48  * halfwords).  Since the compiler automatically sets the lsb of function
49  * addresses, we have to unset it manually before storing it in the 'pc' field
50  * of the ESF.
51  */
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)52 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
53 		     char *stack_ptr, k_thread_entry_t entry,
54 		     void *p1, void *p2, void *p3)
55 {
56 	struct __basic_sf *iframe;
57 
58 #ifdef CONFIG_MPU_STACK_GUARD
59 #if defined(CONFIG_USERSPACE)
60 	if (z_stack_is_user_capable(stack)) {
61 		/* Guard area is carved-out of the buffer instead of reserved
62 		 * for stacks that can host user threads
63 		 */
64 		thread->stack_info.start += MPU_GUARD_ALIGN_AND_SIZE;
65 		thread->stack_info.size -= MPU_GUARD_ALIGN_AND_SIZE;
66 	}
67 #endif /* CONFIG_USERSPACE */
68 #if FP_GUARD_EXTRA_SIZE > 0
69 	if ((thread->base.user_options & K_FP_REGS) != 0) {
70 		/* Larger guard needed due to lazy stacking of FP regs may
71 		 * overshoot the guard area without writing anything. We
72 		 * carve it out of the stack buffer as-needed instead of
73 		 * unconditionally reserving it.
74 		 */
75 		thread->stack_info.start += FP_GUARD_EXTRA_SIZE;
76 		thread->stack_info.size -= FP_GUARD_EXTRA_SIZE;
77 	}
78 #endif /* FP_GUARD_EXTRA_SIZE */
79 #endif /* CONFIG_MPU_STACK_GUARD */
80 
81 	iframe = Z_STACK_PTR_TO_FRAME(struct __basic_sf, stack_ptr);
82 #if defined(CONFIG_USERSPACE)
83 	if ((thread->base.user_options & K_USER) != 0) {
84 		iframe->pc = (uint32_t)arch_user_mode_enter;
85 	} else {
86 		iframe->pc = (uint32_t)z_thread_entry;
87 	}
88 #else
89 	iframe->pc = (uint32_t)z_thread_entry;
90 #endif
91 
92 	iframe->a1 = (uint32_t)entry;
93 	iframe->a2 = (uint32_t)p1;
94 	iframe->a3 = (uint32_t)p2;
95 	iframe->a4 = (uint32_t)p3;
96 
97 	iframe->xpsr = A_BIT | MODE_SYS;
98 #if defined(CONFIG_BIG_ENDIAN)
99 	iframe->xpsr |= E_BIT;
100 #endif /* CONFIG_BIG_ENDIAN */
101 
102 #if defined(CONFIG_COMPILER_ISA_THUMB2)
103 	iframe->xpsr |= T_BIT;
104 #endif /* CONFIG_COMPILER_ISA_THUMB2 */
105 
106 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
107 	iframe = (struct __basic_sf *)
108 		((uintptr_t)iframe - sizeof(struct __fpu_sf));
109 	memset(iframe, 0, sizeof(struct __fpu_sf));
110 #endif
111 
112 	thread->callee_saved.psp = (uint32_t)iframe;
113 	thread->arch.basepri = 0;
114 
115 #if defined(CONFIG_ARM_STORE_EXC_RETURN) || defined(CONFIG_USERSPACE)
116 	thread->arch.mode = 0;
117 #if defined(CONFIG_ARM_STORE_EXC_RETURN)
118 	thread->arch.mode_exc_return = DEFAULT_EXC_RETURN;
119 #endif
120 #if FP_GUARD_EXTRA_SIZE > 0
121 	if ((thread->base.user_options & K_FP_REGS) != 0) {
122 		thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
123 	}
124 #endif
125 #if defined(CONFIG_USERSPACE)
126 	thread->arch.priv_stack_start = 0;
127 #endif
128 #endif
129 	/*
130 	 * initial values in all other registers/thread entries are
131 	 * irrelevant.
132 	 */
133 #if defined(CONFIG_USE_SWITCH)
134 	extern void z_arm_cortex_ar_exit_exc(void);
135 	thread->switch_handle = thread;
136 	/* thread birth happens through the exception return path */
137 	thread->arch.exception_depth = 1;
138 	thread->callee_saved.lr = (uint32_t)z_arm_cortex_ar_exit_exc;
139 #endif
140 }
141 
142 #if defined(CONFIG_MPU_STACK_GUARD) && defined(CONFIG_FPU) \
143 	&& defined(CONFIG_FPU_SHARING)
144 
z_arm_thread_stack_info_adjust(struct k_thread * thread,bool use_large_guard)145 static inline void z_arm_thread_stack_info_adjust(struct k_thread *thread,
146 	bool use_large_guard)
147 {
148 	if (use_large_guard) {
149 		/* Switch to use a large MPU guard if not already. */
150 		if ((thread->arch.mode &
151 			Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) == 0) {
152 			/* Default guard size is used. Update required. */
153 			thread->arch.mode |= Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
154 #if defined(CONFIG_USERSPACE)
155 			if (thread->arch.priv_stack_start) {
156 				/* User thread */
157 				thread->arch.priv_stack_start +=
158 					FP_GUARD_EXTRA_SIZE;
159 			} else
160 #endif /* CONFIG_USERSPACE */
161 			{
162 				/* Privileged thread */
163 				thread->stack_info.start +=
164 					FP_GUARD_EXTRA_SIZE;
165 				thread->stack_info.size -=
166 					FP_GUARD_EXTRA_SIZE;
167 			}
168 		}
169 	} else {
170 		/* Switch to use the default MPU guard size if not already. */
171 		if ((thread->arch.mode &
172 			Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
173 			/* Large guard size is used. Update required. */
174 			thread->arch.mode &= ~Z_ARM_MODE_MPU_GUARD_FLOAT_Msk;
175 #if defined(CONFIG_USERSPACE)
176 			if (thread->arch.priv_stack_start) {
177 				/* User thread */
178 				thread->arch.priv_stack_start -=
179 					FP_GUARD_EXTRA_SIZE;
180 			} else
181 #endif /* CONFIG_USERSPACE */
182 			{
183 				/* Privileged thread */
184 				thread->stack_info.start -=
185 					FP_GUARD_EXTRA_SIZE;
186 				thread->stack_info.size +=
187 					FP_GUARD_EXTRA_SIZE;
188 			}
189 		}
190 	}
191 }
192 
193 #endif
194 
195 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)196 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
197 					void *p1, void *p2, void *p3)
198 {
199 
200 	/* Set up privileged stack before entering user mode */
201 	arch_current_thread()->arch.priv_stack_start =
202 		(uint32_t)z_priv_stack_find(arch_current_thread()->stack_obj);
203 #if defined(CONFIG_MPU_STACK_GUARD)
204 #if defined(CONFIG_THREAD_STACK_INFO)
205 	/* We're dropping to user mode which means the guard area is no
206 	 * longer used here, it instead is moved to the privilege stack
207 	 * to catch stack overflows there. Un-do the calculations done
208 	 * which accounted for memory borrowed from the thread stack.
209 	 */
210 #if FP_GUARD_EXTRA_SIZE > 0
211 	if ((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) {
212 		arch_current_thread()->stack_info.start -= FP_GUARD_EXTRA_SIZE;
213 		arch_current_thread()->stack_info.size += FP_GUARD_EXTRA_SIZE;
214 	}
215 #endif /* FP_GUARD_EXTRA_SIZE */
216 	arch_current_thread()->stack_info.start -= MPU_GUARD_ALIGN_AND_SIZE;
217 	arch_current_thread()->stack_info.size += MPU_GUARD_ALIGN_AND_SIZE;
218 #endif /* CONFIG_THREAD_STACK_INFO */
219 
220 	/* Stack guard area reserved at the bottom of the thread's
221 	 * privileged stack. Adjust the available (writable) stack
222 	 * buffer area accordingly.
223 	 */
224 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
225 	arch_current_thread()->arch.priv_stack_start +=
226 		((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
227 		MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
228 #else
229 	arch_current_thread()->arch.priv_stack_start += MPU_GUARD_ALIGN_AND_SIZE;
230 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
231 #endif /* CONFIG_MPU_STACK_GUARD */
232 
233 #if defined(CONFIG_CPU_AARCH32_CORTEX_R)
234 	arch_current_thread()->arch.priv_stack_end =
235 		arch_current_thread()->arch.priv_stack_start + CONFIG_PRIVILEGED_STACK_SIZE;
236 #endif
237 
238 	z_arm_userspace_enter(user_entry, p1, p2, p3,
239 			     (uint32_t)arch_current_thread()->stack_info.start,
240 			     arch_current_thread()->stack_info.size -
241 			     arch_current_thread()->stack_info.delta);
242 	CODE_UNREACHABLE;
243 }
244 
z_arm_thread_is_in_user_mode(void)245 bool z_arm_thread_is_in_user_mode(void)
246 {
247 	uint32_t value;
248 
249 	/*
250 	 * For Cortex-R, the mode (lower 5) bits will be 0x10 for user mode.
251 	 */
252 	value = __get_CPSR();
253 	return ((value & CPSR_M_Msk) == CPSR_M_USR);
254 }
255 EXPORT_SYMBOL(z_arm_thread_is_in_user_mode);
256 #endif
257 
258 #if defined(CONFIG_MPU_STACK_GUARD) || defined(CONFIG_USERSPACE)
259 
260 #define IS_MPU_GUARD_VIOLATION(guard_start, guard_len, fault_addr, stack_ptr) \
261 	((fault_addr != -EINVAL) ? \
262 	((fault_addr >= guard_start) && \
263 	(fault_addr < (guard_start + guard_len)) && \
264 	(stack_ptr < (guard_start + guard_len))) \
265 	: \
266 	(stack_ptr < (guard_start + guard_len)))
267 
268 /**
269  * @brief Assess occurrence of current thread's stack corruption
270  *
271  * This function performs an assessment whether a memory fault (on a
272  * given memory address) is the result of stack memory corruption of
273  * the current thread.
274  *
275  * Thread stack corruption for supervisor threads or user threads in
276  * privilege mode (when User Space is supported) is reported upon an
277  * attempt to access the stack guard area (if MPU Stack Guard feature
278  * is supported). Additionally the current PSP (process stack pointer)
279  * must be pointing inside or below the guard area.
280  *
281  * Thread stack corruption for user threads in user mode is reported,
282  * if the current PSP is pointing below the start of the current
283  * thread's stack.
284  *
285  * Notes:
286  * - we assume a fully descending stack,
287  * - we assume a stacking error has occurred,
288  * - the function shall be called when handling MemManage and Bus fault,
289  *   and only if a Stacking error has been reported.
290  *
291  * If stack corruption is detected, the function returns the lowest
292  * allowed address where the Stack Pointer can safely point to, to
293  * prevent from errors when un-stacking the corrupted stack frame
294  * upon exception return.
295  *
296  * @param fault_addr memory address on which memory access violation
297  *                   has been reported. It can be invalid (-EINVAL),
298  *                   if only Stacking error has been reported.
299  * @param psp        current address the PSP points to
300  *
301  * @return The lowest allowed stack frame pointer, if error is a
302  *         thread stack corruption, otherwise return 0.
303  */
z_check_thread_stack_fail(const uint32_t fault_addr,const uint32_t psp)304 uint32_t z_check_thread_stack_fail(const uint32_t fault_addr, const uint32_t psp)
305 {
306 #if defined(CONFIG_MULTITHREADING)
307 	const struct k_thread *thread = arch_current_thread();
308 
309 	if (thread == NULL) {
310 		return 0;
311 	}
312 #endif
313 
314 #if (defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)) && \
315 	defined(CONFIG_MPU_STACK_GUARD)
316 	uint32_t guard_len =
317 		((arch_current_thread()->arch.mode & Z_ARM_MODE_MPU_GUARD_FLOAT_Msk) != 0) ?
318 		MPU_GUARD_ALIGN_AND_SIZE_FLOAT : MPU_GUARD_ALIGN_AND_SIZE;
319 #else
320 	/* If MPU_STACK_GUARD is not enabled, the guard length is
321 	 * effectively zero. Stack overflows may be detected only
322 	 * for user threads in nPRIV mode.
323 	 */
324 	uint32_t guard_len = MPU_GUARD_ALIGN_AND_SIZE;
325 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
326 
327 #if defined(CONFIG_USERSPACE)
328 	if (thread->arch.priv_stack_start) {
329 		/* User thread */
330 		if (z_arm_thread_is_in_user_mode() == false) {
331 			/* User thread in privilege mode */
332 			if (IS_MPU_GUARD_VIOLATION(
333 				thread->arch.priv_stack_start - guard_len,
334 					guard_len,
335 				fault_addr, psp)) {
336 				/* Thread's privilege stack corruption */
337 				return thread->arch.priv_stack_start;
338 			}
339 		} else {
340 			if (psp < (uint32_t)thread->stack_obj) {
341 				/* Thread's user stack corruption */
342 				return (uint32_t)thread->stack_obj;
343 			}
344 		}
345 	} else {
346 		/* Supervisor thread */
347 		if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start -
348 				guard_len,
349 				guard_len,
350 				fault_addr, psp)) {
351 			/* Supervisor thread stack corruption */
352 			return thread->stack_info.start;
353 		}
354 	}
355 #else /* CONFIG_USERSPACE */
356 #if defined(CONFIG_MULTITHREADING)
357 	if (IS_MPU_GUARD_VIOLATION(thread->stack_info.start - guard_len,
358 			guard_len,
359 			fault_addr, psp)) {
360 		/* Thread stack corruption */
361 		return thread->stack_info.start;
362 	}
363 #else
364 	if (IS_MPU_GUARD_VIOLATION((uint32_t)z_main_stack,
365 			guard_len,
366 			fault_addr, psp)) {
367 		/* Thread stack corruption */
368 		return (uint32_t)K_THREAD_STACK_BUFFER(z_main_stack);
369 	}
370 #endif
371 #endif /* CONFIG_USERSPACE */
372 
373 	return 0;
374 }
375 #endif /* CONFIG_MPU_STACK_GUARD || CONFIG_USERSPACE */
376 
377 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)378 int arch_float_disable(struct k_thread *thread)
379 {
380 	if (thread != arch_current_thread()) {
381 		return -EINVAL;
382 	}
383 
384 	if (arch_is_in_isr()) {
385 		return -EINVAL;
386 	}
387 
388 	/* Disable all floating point capabilities for the thread */
389 
390 	/* K_FP_REG flag is used in SWAP and stack check fail. Locking
391 	 * interrupts here prevents a possible context-switch or MPU
392 	 * fault to take an outdated thread user_options flag into
393 	 * account.
394 	 */
395 	int key = arch_irq_lock();
396 
397 	thread->base.user_options &= ~K_FP_REGS;
398 
399 	__set_FPEXC(0);
400 
401 	/* No need to add an ISB barrier after setting the CONTROL
402 	 * register; arch_irq_unlock() already adds one.
403 	 */
404 
405 	arch_irq_unlock(key);
406 
407 	return 0;
408 }
409 
arch_float_enable(struct k_thread * thread,unsigned int options)410 int arch_float_enable(struct k_thread *thread, unsigned int options)
411 {
412 	/* This is not supported in Cortex-A and Cortex-R */
413 	return -ENOTSUP;
414 }
415 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
416