1 /*
2  * Copyright (c) 2014 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief New thread creation for ARCv2
10  *
11  * Core thread related primitives for the ARCv2 processor architecture.
12  */
13 
14 #include <zephyr/kernel.h>
15 #include <ksched.h>
16 #include <offsets_short.h>
17 #include <zephyr/wait_q.h>
18 
19 #ifdef CONFIG_USERSPACE
20 #include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
21 #endif
22 
23 #if defined(CONFIG_ARC_DSP) && defined(CONFIG_ARC_DSP_SHARING)
24 #include <zephyr/arch/arc/v2/dsp/arc_dsp.h>
25 static struct k_spinlock lock;
26 #endif
27 /*  initial stack frame */
28 struct init_stack_frame {
29 	uintptr_t pc;
30 #ifdef CONFIG_ARC_HAS_SECURE
31 	uint32_t sec_stat;
32 #endif
33 	uintptr_t status32;
34 	uintptr_t r3;
35 	uintptr_t r2;
36 	uintptr_t r1;
37 	uintptr_t r0;
38 };
39 
40 #ifdef CONFIG_USERSPACE
41 struct user_init_stack_frame {
42 	struct init_stack_frame iframe;
43 	uint32_t user_sp;
44 };
45 
is_user(struct k_thread * thread)46 static bool is_user(struct k_thread *thread)
47 {
48 	return (thread->base.user_options & K_USER) != 0;
49 }
50 #endif
51 
52 /* Set all stack-related architecture variables for the provided thread */
setup_stack_vars(struct k_thread * thread)53 static void setup_stack_vars(struct k_thread *thread)
54 {
55 #ifdef CONFIG_USERSPACE
56 	if (is_user(thread)) {
57 #ifdef CONFIG_GEN_PRIV_STACKS
58 		thread->arch.priv_stack_start =
59 			(uint32_t)z_priv_stack_find(thread->stack_obj);
60 #else
61 		thread->arch.priv_stack_start =	(uint32_t)(thread->stack_obj);
62 #endif /* CONFIG_GEN_PRIV_STACKS */
63 		thread->arch.priv_stack_start += Z_ARC_STACK_GUARD_SIZE;
64 	} else {
65 		thread->arch.priv_stack_start = 0;
66 	}
67 #endif /* CONFIG_USERSPACE */
68 
69 #ifdef CONFIG_ARC_STACK_CHECKING
70 #ifdef CONFIG_USERSPACE
71 	if (is_user(thread)) {
72 		thread->arch.k_stack_top = thread->arch.priv_stack_start;
73 		thread->arch.k_stack_base = (thread->arch.priv_stack_start +
74 					     CONFIG_PRIVILEGED_STACK_SIZE);
75 		thread->arch.u_stack_top = thread->stack_info.start;
76 		thread->arch.u_stack_base = (thread->stack_info.start +
77 					     thread->stack_info.size);
78 	} else
79 #endif /* CONFIG_USERSPACE */
80 	{
81 		thread->arch.k_stack_top = (uint32_t)thread->stack_info.start;
82 		thread->arch.k_stack_base = (uint32_t)(thread->stack_info.start +
83 						    thread->stack_info.size);
84 #ifdef CONFIG_USERSPACE
85 		thread->arch.u_stack_top = 0;
86 		thread->arch.u_stack_base = 0;
87 #endif /* CONFIG_USERSPACE */
88 	}
89 #endif /* CONFIG_ARC_STACK_CHECKING */
90 }
91 
92 /* Get the initial stack frame pointer from the thread's stack buffer. */
get_iframe(struct k_thread * thread,char * stack_ptr)93 static struct init_stack_frame *get_iframe(struct k_thread *thread,
94 					   char *stack_ptr)
95 {
96 #ifdef CONFIG_USERSPACE
97 	if (is_user(thread)) {
98 		/* Initial stack frame for a user thread is slightly larger;
99 		 * we land in z_user_thread_entry_wrapper on the privilege
100 		 * stack, and pop off an additional value for the user
101 		 * stack pointer.
102 		 */
103 		struct user_init_stack_frame *uframe;
104 
105 		uframe = Z_STACK_PTR_TO_FRAME(struct user_init_stack_frame,
106 					      thread->arch.priv_stack_start +
107 					      CONFIG_PRIVILEGED_STACK_SIZE);
108 		uframe->user_sp = (uint32_t)stack_ptr;
109 		return &uframe->iframe;
110 	}
111 #endif
112 	return Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
113 }
114 
115 /*
116  * Pre-populate values in the registers inside _callee_saved_stack struct
117  * so these registers have pre-defined values when new thread begins
118  * execution. For example, setting up the thread pointer for thread local
119  * storage here so the thread starts with thread pointer already set up.
120  */
arch_setup_callee_saved_regs(struct k_thread * thread,uintptr_t stack_ptr)121 static inline void arch_setup_callee_saved_regs(struct k_thread *thread,
122 						uintptr_t stack_ptr)
123 {
124 	_callee_saved_stack_t *regs = UINT_TO_POINTER(stack_ptr);
125 
126 	ARG_UNUSED(regs);
127 
128 /* GCC uses tls pointer cached in register, MWDT just call for _mwget_tls */
129 #if defined(CONFIG_THREAD_LOCAL_STORAGE) && !defined(__CCAC__)
130 #ifdef CONFIG_ISA_ARCV2
131 #if __ARC_TLS_REGNO__ <= 0
132 #error Compiler not configured for thread local storage
133 #endif
134 #define TLSREG _CONCAT(r, __ARC_TLS_REGNO__)
135 	/* __ARC_TLS_REGNO__ is used for thread pointer for ARCv2 */
136 	regs->TLSREG = thread->tls;
137 #else
138 	/* R30 is used for thread pointer for ARCv3 */
139 	regs->r30 = thread->tls;
140 #endif /* CONFIG_ISA_ARCV2 */
141 #endif
142 }
143 
144 /*
145  * The initial context is a basic stack frame that contains arguments for
146  * z_thread_entry() return address, that points at z_thread_entry()
147  * and status register.
148  */
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)149 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
150 		     char *stack_ptr, k_thread_entry_t entry,
151 		     void *p1, void *p2, void *p3)
152 {
153 	struct init_stack_frame *iframe;
154 
155 	setup_stack_vars(thread);
156 
157 	/* Set up initial stack frame */
158 	iframe = get_iframe(thread, stack_ptr);
159 
160 #ifdef CONFIG_USERSPACE
161 	/* enable US bit, US is read as zero in user mode. This will allow user
162 	 * mode sleep instructions, and it enables a form of denial-of-service
163 	 * attack by putting the processor in sleep mode, but since interrupt
164 	 * level/mask can't be set from user space that's not worse than
165 	 * executing a loop without yielding.
166 	 */
167 	iframe->status32 = _ARC_V2_STATUS32_US | _ARC_V2_STATUS32_DZ;
168 	if (is_user(thread)) {
169 		iframe->pc = (uint32_t)z_user_thread_entry_wrapper;
170 	} else {
171 		iframe->pc = (uint32_t)z_thread_entry_wrapper;
172 	}
173 #else
174 	iframe->status32 = _ARC_V2_STATUS32_DZ;
175 	iframe->pc = ((uintptr_t)z_thread_entry_wrapper);
176 #endif /* CONFIG_USERSPACE */
177 #ifdef CONFIG_ARC_SECURE_FIRMWARE
178 	iframe->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
179 #endif
180 	iframe->r0 = (uintptr_t)entry;
181 	iframe->r1 = (uintptr_t)p1;
182 	iframe->r2 = (uintptr_t)p2;
183 	iframe->r3 = (uintptr_t)p3;
184 
185 #ifdef CONFIG_ARC_STACK_CHECKING
186 #ifdef CONFIG_ARC_SECURE_FIRMWARE
187 	iframe->sec_stat |= _ARC_V2_SEC_STAT_SSC;
188 #else
189 	iframe->status32 |= _ARC_V2_STATUS32_SC;
190 #endif /* CONFIG_ARC_SECURE_FIRMWARE */
191 #endif /* CONFIG_ARC_STACK_CHECKING */
192 #ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
193 	iframe->status32 |= _ARC_V2_STATUS32_AD;
194 #endif
195 	/* Set required thread members */
196 	thread->switch_handle = thread;
197 	thread->arch.relinquish_cause = _CAUSE_COOP;
198 	thread->callee_saved.sp =
199 		(uintptr_t)iframe - ___callee_saved_stack_t_SIZEOF;
200 
201 	arch_setup_callee_saved_regs(thread, thread->callee_saved.sp);
202 
203 	/* initial values in all other regs/k_thread entries are irrelevant */
204 }
205 
206 #ifdef CONFIG_MULTITHREADING
z_arch_get_next_switch_handle(struct k_thread ** old_thread)207 void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
208 {
209 	*old_thread =  _current;
210 
211 	return z_get_next_switch_handle(NULL);
212 }
213 #else
z_arch_get_next_switch_handle(struct k_thread ** old_thread)214 void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
215 {
216 	ARG_UNUSED(old_thread);
217 
218 	return NULL;
219 }
220 #endif
221 
222 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)223 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
224 					void *p1, void *p2, void *p3)
225 {
226 	setup_stack_vars(_current);
227 
228 	/* possible optimizaiton: no need to load mem domain anymore */
229 	/* need to lock cpu here ? */
230 	configure_mpu_thread(_current);
231 
232 	z_arc_userspace_enter(user_entry, p1, p2, p3,
233 			      (uint32_t)_current->stack_info.start,
234 			      (_current->stack_info.size -
235 			       _current->stack_info.delta), _current);
236 	CODE_UNREACHABLE;
237 }
238 #endif
239 
240 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)241 int arch_float_disable(struct k_thread *thread)
242 {
243 	unsigned int key;
244 
245 	/* Ensure a preemptive context switch does not occur */
246 
247 	key = irq_lock();
248 
249 	/* Disable all floating point capabilities for the thread */
250 	thread->base.user_options &= ~K_FP_REGS;
251 
252 	irq_unlock(key);
253 
254 	return 0;
255 }
256 
257 
arch_float_enable(struct k_thread * thread,unsigned int options)258 int arch_float_enable(struct k_thread *thread, unsigned int options)
259 {
260 	unsigned int key;
261 
262 	/* Ensure a preemptive context switch does not occur */
263 
264 	key = irq_lock();
265 
266 	/* Enable all floating point capabilities for the thread */
267 	thread->base.user_options |= K_FP_REGS;
268 
269 	irq_unlock(key);
270 
271 	return 0;
272 }
273 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
274 
275 #if !defined(CONFIG_MULTITHREADING)
276 
277 K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
278 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
279 
280 extern void z_main_no_multithreading_entry_wrapper(void *p1, void *p2, void *p3,
281 						   void *main_stack, void *main_entry);
282 
z_arc_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)283 FUNC_NORETURN void z_arc_switch_to_main_no_multithreading(k_thread_entry_t main_entry,
284 							  void *p1, void *p2, void *p3)
285 {
286 	_kernel.cpus[0].id = 0;
287 	_kernel.cpus[0].irq_stack = (Z_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
288 				     K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
289 
290 	void *main_stack = (Z_THREAD_STACK_BUFFER(z_main_stack) +
291 			    K_THREAD_STACK_SIZEOF(z_main_stack));
292 
293 	arch_irq_unlock(_ARC_V2_INIT_IRQ_LOCK_KEY);
294 
295 	z_main_no_multithreading_entry_wrapper(p1, p2, p3, main_stack, main_entry);
296 
297 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
298 }
299 #endif /* !CONFIG_MULTITHREADING */
300 
301 #if defined(CONFIG_ARC_DSP) && defined(CONFIG_ARC_DSP_SHARING)
arc_dsp_disable(struct k_thread * thread,unsigned int options)302 void arc_dsp_disable(struct k_thread *thread, unsigned int options)
303 {
304 	/* Ensure a preemptive context switch does not occur */
305 	k_spinlock_key_t key = k_spin_lock(&lock);
306 
307 	/* Disable DSP or AGU capabilities for the thread */
308 	thread->base.user_options &= ~(uint8_t)options;
309 
310 	k_spin_unlock(&lock, key);
311 }
312 
arc_dsp_enable(struct k_thread * thread,unsigned int options)313 void arc_dsp_enable(struct k_thread *thread, unsigned int options)
314 {
315 	/* Ensure a preemptive context switch does not occur */
316 	k_spinlock_key_t key = k_spin_lock(&lock);
317 
318 	/* Enable dsp or agu capabilities for the thread */
319 	thread->base.user_options |= (uint8_t)options;
320 
321 	k_spin_unlock(&lock, key);
322 }
323 #endif /* CONFIG_ARC_DSP && CONFIG_ARC_DSP_SHARING */
324