1 /*
2  * Copyright (c) 2014 Wind River Systems, Inc.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief New thread creation for ARCv2
10  *
11  * Core thread related primitives for the ARCv2 processor architecture.
12  */
13 
14 #include <zephyr/kernel.h>
15 #include <ksched.h>
16 #include <offsets_short.h>
17 
18 #ifdef CONFIG_USERSPACE
19 #include <zephyr/arch/arc/v2/mpu/arc_core_mpu.h>
20 #endif
21 
22 #if defined(CONFIG_ARC_VPX_COOPERATIVE_SHARING) || defined(CONFIG_DSP_SHARING)
23 #include <zephyr/arch/arc/v2/dsp/arc_dsp.h>
24 static struct k_spinlock lock;
25 #endif
26 
27 #if defined(CONFIG_ARC_VPX_COOPERATIVE_SHARING)
28 static struct k_sem vpx_sem[CONFIG_MP_MAX_NUM_CPUS];
29 #endif
30 
31 /*  initial stack frame */
32 struct init_stack_frame {
33 	uintptr_t pc;
34 #ifdef CONFIG_ARC_HAS_SECURE
35 	uint32_t sec_stat;
36 #endif
37 	uintptr_t status32;
38 	uintptr_t r3;
39 	uintptr_t r2;
40 	uintptr_t r1;
41 	uintptr_t r0;
42 };
43 
44 #ifdef CONFIG_USERSPACE
45 struct user_init_stack_frame {
46 	struct init_stack_frame iframe;
47 	uint32_t user_sp;
48 };
49 
is_user(struct k_thread * thread)50 static bool is_user(struct k_thread *thread)
51 {
52 	return (thread->base.user_options & K_USER) != 0;
53 }
54 #endif
55 
56 /* Set all stack-related architecture variables for the provided thread */
setup_stack_vars(struct k_thread * thread)57 static void setup_stack_vars(struct k_thread *thread)
58 {
59 #ifdef CONFIG_USERSPACE
60 	if (is_user(thread)) {
61 #ifdef CONFIG_GEN_PRIV_STACKS
62 		thread->arch.priv_stack_start =
63 			(uint32_t)z_priv_stack_find(thread->stack_obj);
64 #else
65 		thread->arch.priv_stack_start =	(uint32_t)(thread->stack_obj);
66 #endif /* CONFIG_GEN_PRIV_STACKS */
67 		thread->arch.priv_stack_start += Z_ARC_STACK_GUARD_SIZE;
68 	} else {
69 		thread->arch.priv_stack_start = 0;
70 	}
71 #endif /* CONFIG_USERSPACE */
72 
73 #ifdef CONFIG_ARC_STACK_CHECKING
74 #ifdef CONFIG_USERSPACE
75 	if (is_user(thread)) {
76 		thread->arch.k_stack_top = thread->arch.priv_stack_start;
77 		thread->arch.k_stack_base = (thread->arch.priv_stack_start +
78 					     CONFIG_PRIVILEGED_STACK_SIZE);
79 		thread->arch.u_stack_top = thread->stack_info.start;
80 		thread->arch.u_stack_base = (thread->stack_info.start +
81 					     thread->stack_info.size);
82 	} else
83 #endif /* CONFIG_USERSPACE */
84 	{
85 		thread->arch.k_stack_top = (uint32_t)thread->stack_info.start;
86 		thread->arch.k_stack_base = (uint32_t)(thread->stack_info.start +
87 						    thread->stack_info.size);
88 #ifdef CONFIG_USERSPACE
89 		thread->arch.u_stack_top = 0;
90 		thread->arch.u_stack_base = 0;
91 #endif /* CONFIG_USERSPACE */
92 	}
93 #endif /* CONFIG_ARC_STACK_CHECKING */
94 }
95 
96 /* Get the initial stack frame pointer from the thread's stack buffer. */
get_iframe(struct k_thread * thread,char * stack_ptr)97 static struct init_stack_frame *get_iframe(struct k_thread *thread,
98 					   char *stack_ptr)
99 {
100 #ifdef CONFIG_USERSPACE
101 	if (is_user(thread)) {
102 		/* Initial stack frame for a user thread is slightly larger;
103 		 * we land in z_user_thread_entry_wrapper on the privilege
104 		 * stack, and pop off an additional value for the user
105 		 * stack pointer.
106 		 */
107 		struct user_init_stack_frame *uframe;
108 
109 		uframe = Z_STACK_PTR_TO_FRAME(struct user_init_stack_frame,
110 					      thread->arch.priv_stack_start +
111 					      CONFIG_PRIVILEGED_STACK_SIZE);
112 		uframe->user_sp = (uint32_t)stack_ptr;
113 		return &uframe->iframe;
114 	}
115 #endif
116 	return Z_STACK_PTR_TO_FRAME(struct init_stack_frame, stack_ptr);
117 }
118 
119 /*
120  * Pre-populate values in the registers inside _callee_saved_stack struct
121  * so these registers have pre-defined values when new thread begins
122  * execution. For example, setting up the thread pointer for thread local
123  * storage here so the thread starts with thread pointer already set up.
124  */
arch_setup_callee_saved_regs(struct k_thread * thread,uintptr_t stack_ptr)125 static inline void arch_setup_callee_saved_regs(struct k_thread *thread,
126 						uintptr_t stack_ptr)
127 {
128 	_callee_saved_stack_t *regs = UINT_TO_POINTER(stack_ptr);
129 
130 	ARG_UNUSED(regs);
131 
132 /* GCC uses tls pointer cached in register, MWDT just call for _mwget_tls */
133 #if defined(CONFIG_THREAD_LOCAL_STORAGE) && !defined(__CCAC__)
134 #ifdef CONFIG_ISA_ARCV2
135 #if __ARC_TLS_REGNO__ <= 0
136 #error Compiler not configured for thread local storage
137 #endif
138 #define TLSREG _CONCAT(r, __ARC_TLS_REGNO__)
139 	/* __ARC_TLS_REGNO__ is used for thread pointer for ARCv2 */
140 	regs->TLSREG = thread->tls;
141 #else
142 	/* R30 is used for thread pointer for ARCv3 */
143 	regs->r30 = thread->tls;
144 #endif /* CONFIG_ISA_ARCV2 */
145 #endif
146 }
147 
148 /*
149  * The initial context is a basic stack frame that contains arguments for
150  * z_thread_entry() return address, that points at z_thread_entry()
151  * and status register.
152  */
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)153 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
154 		     char *stack_ptr, k_thread_entry_t entry,
155 		     void *p1, void *p2, void *p3)
156 {
157 	struct init_stack_frame *iframe;
158 
159 	setup_stack_vars(thread);
160 
161 	/* Set up initial stack frame */
162 	iframe = get_iframe(thread, stack_ptr);
163 
164 #ifdef CONFIG_USERSPACE
165 	/* enable US bit, US is read as zero in user mode. This will allow user
166 	 * mode sleep instructions, and it enables a form of denial-of-service
167 	 * attack by putting the processor in sleep mode, but since interrupt
168 	 * level/mask can't be set from user space that's not worse than
169 	 * executing a loop without yielding.
170 	 */
171 	iframe->status32 = _ARC_V2_STATUS32_US | _ARC_V2_STATUS32_DZ;
172 	if (is_user(thread)) {
173 		iframe->pc = (uint32_t)z_user_thread_entry_wrapper;
174 	} else {
175 		iframe->pc = (uint32_t)z_thread_entry_wrapper;
176 	}
177 #else
178 	iframe->status32 = _ARC_V2_STATUS32_DZ;
179 	iframe->pc = ((uintptr_t)z_thread_entry_wrapper);
180 #endif /* CONFIG_USERSPACE */
181 #ifdef CONFIG_ARC_SECURE_FIRMWARE
182 	iframe->sec_stat = z_arc_v2_aux_reg_read(_ARC_V2_SEC_STAT);
183 #endif
184 	iframe->r0 = (uintptr_t)entry;
185 	iframe->r1 = (uintptr_t)p1;
186 	iframe->r2 = (uintptr_t)p2;
187 	iframe->r3 = (uintptr_t)p3;
188 
189 #ifdef CONFIG_ARC_STACK_CHECKING
190 #ifdef CONFIG_ARC_SECURE_FIRMWARE
191 	iframe->sec_stat |= _ARC_V2_SEC_STAT_SSC;
192 #else
193 	iframe->status32 |= _ARC_V2_STATUS32_SC;
194 #endif /* CONFIG_ARC_SECURE_FIRMWARE */
195 #endif /* CONFIG_ARC_STACK_CHECKING */
196 #ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
197 	iframe->status32 |= _ARC_V2_STATUS32_AD;
198 #endif
199 	/* Set required thread members */
200 	thread->switch_handle = thread;
201 	thread->arch.relinquish_cause = _CAUSE_COOP;
202 	thread->callee_saved.sp =
203 		(uintptr_t)iframe - ___callee_saved_stack_t_SIZEOF;
204 
205 	arch_setup_callee_saved_regs(thread, thread->callee_saved.sp);
206 
207 	/* initial values in all other regs/k_thread entries are irrelevant */
208 }
209 
210 #ifdef CONFIG_MULTITHREADING
z_arch_get_next_switch_handle(struct k_thread ** old_thread)211 void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
212 {
213 	*old_thread = arch_current_thread();
214 
215 	return z_get_next_switch_handle(NULL);
216 }
217 #else
z_arch_get_next_switch_handle(struct k_thread ** old_thread)218 void *z_arch_get_next_switch_handle(struct k_thread **old_thread)
219 {
220 	ARG_UNUSED(old_thread);
221 
222 	return NULL;
223 }
224 #endif
225 
226 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)227 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
228 					void *p1, void *p2, void *p3)
229 {
230 	setup_stack_vars(arch_current_thread());
231 
232 	/* possible optimizaiton: no need to load mem domain anymore */
233 	/* need to lock cpu here ? */
234 	configure_mpu_thread(arch_current_thread());
235 
236 	z_arc_userspace_enter(user_entry, p1, p2, p3,
237 			      (uint32_t)arch_current_thread()->stack_info.start,
238 			      (arch_current_thread()->stack_info.size -
239 			       arch_current_thread()->stack_info.delta), arch_current_thread());
240 	CODE_UNREACHABLE;
241 }
242 #endif
243 
244 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)245 int arch_float_disable(struct k_thread *thread)
246 {
247 	unsigned int key;
248 
249 	/* Ensure a preemptive context switch does not occur */
250 
251 	key = irq_lock();
252 
253 	/* Disable all floating point capabilities for the thread */
254 	thread->base.user_options &= ~K_FP_REGS;
255 
256 	irq_unlock(key);
257 
258 	return 0;
259 }
260 
261 
arch_float_enable(struct k_thread * thread,unsigned int options)262 int arch_float_enable(struct k_thread *thread, unsigned int options)
263 {
264 	unsigned int key;
265 
266 	/* Ensure a preemptive context switch does not occur */
267 
268 	key = irq_lock();
269 
270 	/* Enable all floating point capabilities for the thread */
271 	thread->base.user_options |= K_FP_REGS;
272 
273 	irq_unlock(key);
274 
275 	return 0;
276 }
277 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
278 
279 #if !defined(CONFIG_MULTITHREADING)
280 
281 K_KERNEL_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS, CONFIG_ISR_STACK_SIZE);
282 K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
283 
284 extern void z_main_no_multithreading_entry_wrapper(void *p1, void *p2, void *p3,
285 						   void *main_stack, void *main_entry);
286 
z_arc_switch_to_main_no_multithreading(k_thread_entry_t main_entry,void * p1,void * p2,void * p3)287 FUNC_NORETURN void z_arc_switch_to_main_no_multithreading(k_thread_entry_t main_entry,
288 							  void *p1, void *p2, void *p3)
289 {
290 	_kernel.cpus[0].id = 0;
291 	_kernel.cpus[0].irq_stack = (K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]) +
292 				     K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]));
293 
294 	void *main_stack = (K_THREAD_STACK_BUFFER(z_main_stack) +
295 			    K_THREAD_STACK_SIZEOF(z_main_stack));
296 
297 	arch_irq_unlock(_ARC_V2_INIT_IRQ_LOCK_KEY);
298 
299 	z_main_no_multithreading_entry_wrapper(p1, p2, p3, main_stack, main_entry);
300 
301 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
302 }
303 #endif /* !CONFIG_MULTITHREADING */
304 
305 #if defined(CONFIG_ARC_DSP) && defined(CONFIG_DSP_SHARING)
arc_dsp_disable(struct k_thread * thread,unsigned int options)306 void arc_dsp_disable(struct k_thread *thread, unsigned int options)
307 {
308 	/* Ensure a preemptive context switch does not occur */
309 	k_spinlock_key_t key = k_spin_lock(&lock);
310 
311 	/* Disable DSP or AGU capabilities for the thread */
312 	thread->base.user_options &= ~(uint8_t)options;
313 
314 	k_spin_unlock(&lock, key);
315 }
316 
arc_dsp_enable(struct k_thread * thread,unsigned int options)317 void arc_dsp_enable(struct k_thread *thread, unsigned int options)
318 {
319 	/* Ensure a preemptive context switch does not occur */
320 	k_spinlock_key_t key = k_spin_lock(&lock);
321 
322 	/* Enable dsp or agu capabilities for the thread */
323 	thread->base.user_options |= (uint8_t)options;
324 
325 	k_spin_unlock(&lock, key);
326 }
327 #endif /* CONFIG_ARC_DSP && CONFIG_DSP_SHARING */
328 
329 #if defined(CONFIG_ARC_VPX_COOPERATIVE_SHARING)
arc_vpx_lock(k_timeout_t timeout)330 int arc_vpx_lock(k_timeout_t timeout)
331 {
332 	k_spinlock_key_t key;
333 	unsigned int id;
334 
335 	key = k_spin_lock(&lock);
336 
337 	id = _current_cpu->id;
338 #if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
339 	__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
340 #endif
341 	k_spin_unlock(&lock, key);
342 
343 	/*
344 	 * It is assumed that the thread is (still) pinned to
345 	 * the same CPU identified by <id>.
346 	 */
347 
348 	return k_sem_take(&vpx_sem[id], timeout);
349 }
350 
arc_vpx_unlock(void)351 void arc_vpx_unlock(void)
352 {
353 	k_spinlock_key_t key;
354 	unsigned int id;
355 
356 	key = k_spin_lock(&lock);
357 #if (CONFIG_MP_MAX_NUM_CPUS > 1) && defined(CONFIG_SCHED_CPU_MASK)
358 	__ASSERT(!arch_is_in_isr() && (arch_current_thread()->base.cpu_mask == BIT(id)), "");
359 #endif
360 	id = _current_cpu->id;
361 	k_spin_unlock(&lock, key);
362 
363 	/*
364 	 * It is assumed that this thread is (still) pinned to
365 	 * the CPU identified by <id>, and that it is the same CPU
366 	 * used by arc_vpx_lock().
367 	 */
368 
369 	k_sem_give(&vpx_sem[id]);
370 }
371 
arc_vpx_unlock_force(unsigned int id)372 void arc_vpx_unlock_force(unsigned int id)
373 {
374 	__ASSERT(id < CONFIG_MP_MAX_NUM_CPUS, "");
375 
376 	k_sem_give(&vpx_sem[id]);
377 }
378 
arc_vpx_sem_init(void)379 static int arc_vpx_sem_init(void)
380 {
381 	for (unsigned int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
382 		k_sem_init(vpx_sem, 1, 1);
383 	}
384 
385 	return 0;
386 }
387 
388 SYS_INIT(arc_vpx_sem_init, PRE_KERNEL_2, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS);
389 #endif
390