1 /*
2  * Copyright (c) 2017, 2023 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <string.h>
8 
9 #include <zephyr/kernel.h>
10 #include <kernel_internal.h>
11 
12 #include <xtensa_asm2_context.h>
13 #include <xtensa_internal.h>
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
17 
18 #ifdef CONFIG_USERSPACE
19 
20 #ifdef CONFIG_THREAD_LOCAL_STORAGE
21 /*
22  * Per-thread (TLS) variable indicating whether execution is in user mode.
23  */
24 __thread uint32_t is_user_mode;
25 #endif
26 
27 #endif /* CONFIG_USERSPACE */
28 
29 /**
30  * Initializes a stack area such that it can be "restored" later and
31  * begin running with the specified function and three arguments.  The
32  * entry function takes three arguments to match the signature of
33  * Zephyr's k_thread_entry_t.  Thread will start with EXCM clear and
34  * INTLEVEL set to zero (i.e. it's a user thread, we don't start with
35  * anything masked, so don't assume that!).
36  */
init_stack(struct k_thread * thread,int * stack_top,void (* entry)(void *,void *,void *),void * arg1,void * arg2,void * arg3)37 static void *init_stack(struct k_thread *thread, int *stack_top,
38 			void (*entry)(void *, void *, void *),
39 			void *arg1, void *arg2, void *arg3)
40 {
41 	void *ret;
42 	_xtensa_irq_stack_frame_a11_t *frame;
43 #ifdef CONFIG_USERSPACE
44 	struct xtensa_thread_stack_header *header =
45 		(struct xtensa_thread_stack_header *)thread->stack_obj;
46 
47 	thread->arch.psp = header->privilege_stack +
48 		sizeof(header->privilege_stack);
49 #endif
50 
51 	/* Not-a-cpu ID Ensures that the first time this is run, the
52 	 * stack will be invalidated.  That covers the edge case of
53 	 * restarting a thread on a stack that had previously been run
54 	 * on one CPU, but then initialized on this one, and
55 	 * potentially run THERE and not HERE.
56 	 */
57 	thread->arch.last_cpu = -1;
58 
59 	/* We cheat and shave 16 bytes off, the top four words are the
60 	 * A0-A3 spill area for the caller of the entry function,
61 	 * which doesn't exist.  It will never be touched, so we
62 	 * arrange to enter the function with a CALLINC of 1 and a
63 	 * stack pointer 16 bytes above the top, so its ENTRY at the
64 	 * start will decrement the stack pointer by 16.
65 	 */
66 	const int bsasz = sizeof(*frame) - 16;
67 
68 	frame = (void *)(((char *) stack_top) - bsasz);
69 
70 	(void)memset(frame, 0, bsasz);
71 
72 	frame->bsa.ps = PS_WOE | PS_UM | PS_CALLINC(1);
73 #ifdef CONFIG_USERSPACE
74 	if ((thread->base.user_options & K_USER) == K_USER) {
75 		frame->bsa.pc = (uintptr_t)arch_user_mode_enter;
76 	} else {
77 		frame->bsa.pc = (uintptr_t)z_thread_entry;
78 	}
79 #else
80 	frame->bsa.pc = (uintptr_t)z_thread_entry;
81 #endif
82 
83 #if XCHAL_HAVE_THREADPTR
84 #ifdef CONFIG_THREAD_LOCAL_STORAGE
85 	frame->bsa.threadptr = thread->tls;
86 #elif CONFIG_USERSPACE
87 	frame->bsa.threadptr = (uintptr_t)((thread->base.user_options & K_USER) ? thread : NULL);
88 #endif
89 #endif
90 
91 	/* Arguments to z_thread_entry().  Remember these start at A6,
92 	 * which will be rotated into A2 by the ENTRY instruction that
93 	 * begins the C function.  And A4-A7 and A8-A11 are optional
94 	 * quads that live below the BSA!
95 	 */
96 	frame->a7 = (uintptr_t)arg1;  /* a7 */
97 	frame->a6 = (uintptr_t)entry; /* a6 */
98 	frame->a5 = 0;                /* a5 */
99 	frame->a4 = 0;                /* a4 */
100 
101 	frame->a11 = 0;                /* a11 */
102 	frame->a10 = 0;                /* a10 */
103 	frame->a9  = (uintptr_t)arg3;  /* a9 */
104 	frame->a8  = (uintptr_t)arg2;  /* a8 */
105 
106 	/* Finally push the BSA pointer and return the stack pointer
107 	 * as the handle
108 	 */
109 	frame->ptr_to_bsa = (void *)&frame->bsa;
110 	ret = &frame->ptr_to_bsa;
111 
112 	return ret;
113 }
114 
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)115 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
116 		     char *stack_ptr, k_thread_entry_t entry,
117 		     void *p1, void *p2, void *p3)
118 {
119 	thread->switch_handle = init_stack(thread, (int *)stack_ptr, entry,
120 					   p1, p2, p3);
121 #ifdef CONFIG_KERNEL_COHERENCE
122 	__ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, "");
123 	__ASSERT((((size_t)stack_ptr) % XCHAL_DCACHE_LINESIZE) == 0, "");
124 	sys_cache_data_flush_and_invd_range(stack, (char *)stack_ptr - (char *)stack);
125 #endif
126 }
127 
128 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)129 int arch_float_disable(struct k_thread *thread)
130 {
131 	/* xtensa always has FPU enabled so cannot be disabled */
132 	return -ENOTSUP;
133 }
134 
arch_float_enable(struct k_thread * thread,unsigned int options)135 int arch_float_enable(struct k_thread *thread, unsigned int options)
136 {
137 	/* xtensa always has FPU enabled so nothing to do here */
138 	return 0;
139 }
140 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
141 
142 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)143 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
144 					void *p1, void *p2, void *p3)
145 {
146 	struct k_thread *current = _current;
147 	size_t stack_end;
148 
149 	/* Transition will reset stack pointer to initial, discarding
150 	 * any old context since this is a one-way operation
151 	 */
152 	stack_end = Z_STACK_PTR_ALIGN(current->stack_info.start +
153 				      current->stack_info.size -
154 				      current->stack_info.delta);
155 
156 	xtensa_userspace_enter(user_entry, p1, p2, p3,
157 			       stack_end, current->stack_info.start);
158 
159 	CODE_UNREACHABLE;
160 }
161 #endif /* CONFIG_USERSPACE */
162