1 /*
2 * Copyright (c) 2017, 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <string.h>
9
10 #include <zephyr/kernel.h>
11 #include <kernel_internal.h>
12
13 #include <xtensa_asm2_context.h>
14 #include <xtensa_internal.h>
15
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
18
19 #ifdef CONFIG_USERSPACE
20
21 #ifdef CONFIG_THREAD_LOCAL_STORAGE
22 /*
23 * Per-thread (TLS) variable indicating whether execution is in user mode.
24 */
25 Z_THREAD_LOCAL uint32_t is_user_mode;
26 #endif
27
28 #endif /* CONFIG_USERSPACE */
29
30 /**
31 * Initializes a stack area such that it can be "restored" later and
32 * begin running with the specified function and three arguments. The
33 * entry function takes three arguments to match the signature of
34 * Zephyr's k_thread_entry_t. Thread will start with EXCM clear and
35 * INTLEVEL set to zero (i.e. it's a user thread, we don't start with
36 * anything masked, so don't assume that!).
37 */
init_stack(struct k_thread * thread,int * stack_top,void (* entry)(void *,void *,void *),void * arg1,void * arg2,void * arg3)38 static void *init_stack(struct k_thread *thread, int *stack_top,
39 void (*entry)(void *, void *, void *),
40 void *arg1, void *arg2, void *arg3)
41 {
42 void *ret;
43 _xtensa_irq_stack_frame_a11_t *frame;
44 #ifdef CONFIG_USERSPACE
45 thread->arch.psp = NULL;
46 #endif
47
48 /* Not-a-cpu ID Ensures that the first time this is run, the
49 * stack will be invalidated. That covers the edge case of
50 * restarting a thread on a stack that had previously been run
51 * on one CPU, but then initialized on this one, and
52 * potentially run THERE and not HERE.
53 */
54 thread->arch.last_cpu = -1;
55
56 /* We cheat and shave 16 bytes off, the top four words are the
57 * A0-A3 spill area for the caller of the entry function,
58 * which doesn't exist. It will never be touched, so we
59 * arrange to enter the function with a CALLINC of 1 and a
60 * stack pointer 16 bytes above the top, so its ENTRY at the
61 * start will decrement the stack pointer by 16.
62 */
63 const int bsasz = sizeof(*frame) - 16;
64
65 frame = (void *)(((char *) stack_top) - bsasz);
66
67 (void)memset(frame, 0, bsasz);
68
69 #ifdef CONFIG_USERSPACE
70 /* _restore_context uses this instead of frame->bsa.ps to
71 * restore PS value.
72 */
73 thread->arch.return_ps = PS_WOE | PS_UM | PS_CALLINC(1);
74
75 if ((thread->base.user_options & K_USER) == K_USER) {
76 frame->bsa.pc = (uintptr_t)arch_user_mode_enter;
77 } else {
78 frame->bsa.pc = (uintptr_t)z_thread_entry;
79 }
80 #else
81 frame->bsa.ps = PS_WOE | PS_UM | PS_CALLINC(1);
82 frame->bsa.pc = (uintptr_t)z_thread_entry;
83 #endif
84
85 #if XCHAL_HAVE_THREADPTR
86 #ifdef CONFIG_THREAD_LOCAL_STORAGE
87 frame->bsa.threadptr = thread->tls;
88 #elif CONFIG_USERSPACE
89 frame->bsa.threadptr = (uintptr_t)((thread->base.user_options & K_USER) ? thread : NULL);
90 #endif
91 #endif
92
93 /* Arguments to z_thread_entry(). Remember these start at A6,
94 * which will be rotated into A2 by the ENTRY instruction that
95 * begins the C function. And A4-A7 and A8-A11 are optional
96 * quads that live below the BSA!
97 */
98 frame->a7 = (uintptr_t)arg1; /* a7 */
99 frame->a6 = (uintptr_t)entry; /* a6 */
100 frame->a5 = 0; /* a5 */
101 frame->a4 = 0; /* a4 */
102
103 frame->a11 = 0; /* a11 */
104 frame->a10 = 0; /* a10 */
105 frame->a9 = (uintptr_t)arg3; /* a9 */
106 frame->a8 = (uintptr_t)arg2; /* a8 */
107
108 /* Finally push the BSA pointer and return the stack pointer
109 * as the handle
110 */
111 frame->ptr_to_bsa = (void *)&frame->bsa;
112 ret = &frame->ptr_to_bsa;
113
114 return ret;
115 }
116
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)117 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
118 char *stack_ptr, k_thread_entry_t entry,
119 void *p1, void *p2, void *p3)
120 {
121 thread->switch_handle = init_stack(thread, (int *)stack_ptr, entry,
122 p1, p2, p3);
123 #ifdef CONFIG_XTENSA_LAZY_HIFI_SHARING
124 memset(thread->arch.hifi_regs, 0, sizeof(thread->arch.hifi_regs));
125 #endif /* CONFIG_XTENSA_LAZY_HIFI_SHARING */
126
127 #ifdef CONFIG_KERNEL_COHERENCE
128 __ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, "");
129 __ASSERT((((size_t)stack_ptr) % XCHAL_DCACHE_LINESIZE) == 0, "");
130 sys_cache_data_flush_and_invd_range(stack, (char *)stack_ptr - (char *)stack);
131 #endif
132 }
133
134 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)135 int arch_float_disable(struct k_thread *thread)
136 {
137 ARG_UNUSED(thread);
138 /* xtensa always has FPU enabled so cannot be disabled */
139 return -ENOTSUP;
140 }
141
arch_float_enable(struct k_thread * thread,unsigned int options)142 int arch_float_enable(struct k_thread *thread, unsigned int options)
143 {
144 ARG_UNUSED(thread);
145 ARG_UNUSED(options);
146 /* xtensa always has FPU enabled so nothing to do here */
147 return 0;
148 }
149 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
150
151
152 #if defined(CONFIG_XTENSA_LAZY_HIFI_SHARING)
xtensa_hifi_disown(struct k_thread * thread)153 void xtensa_hifi_disown(struct k_thread *thread)
154 {
155 unsigned int cpu_id = 0;
156 struct k_thread *owner;
157
158 #if CONFIG_MP_MAX_NUM_CPUS > 1
159 cpu_id = thread->base.cpu;
160 #endif
161
162 owner = atomic_ptr_get(&_kernel.cpus[cpu_id].arch.hifi_owner);
163
164 if (owner == thread) {
165 atomic_ptr_set(&_kernel.cpus[cpu_id].arch.hifi_owner, NULL);
166 }
167 }
168 #endif
169
arch_coprocessors_disable(struct k_thread * thread)170 int arch_coprocessors_disable(struct k_thread *thread)
171 {
172 bool enotsup = true;
173
174 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
175 arch_float_disable(thread);
176 enotsup = false;
177 #endif
178
179 #if defined(CONFIG_XTENSA_LAZY_HIFI_SHARING)
180 xtensa_hifi_disown(thread);
181
182 /*
183 * This routine is only called when aborting a thread and we
184 * deliberately do not disable the HiFi coprocessor here.
185 * 1. Such disabling can only be done for the current CPU, and we do
186 * not have control over which CPU the thread is running on.
187 * 2. If the thread (being deleted) is a currently executing thread,
188 * there will be a context switch to another thread and that CPU
189 * will automatically disable the HiFi coprocessor upon the switch.
190 */
191 enotsup = false;
192 #endif
193 return enotsup ? -ENOTSUP : 0;
194 }
195
196 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)197 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
198 void *p1, void *p2, void *p3)
199 {
200 struct k_thread *current = _current;
201 size_t stack_end;
202
203 struct xtensa_thread_stack_header *header =
204 (struct xtensa_thread_stack_header *)current->stack_obj;
205
206 current->arch.psp = header->privilege_stack +
207 sizeof(header->privilege_stack);
208
209 #ifdef CONFIG_INIT_STACKS
210 /* setup_thread_stack() does not initialize the architecture specific
211 * privileged stack. So we need to do it manually here as this function
212 * is called by arch_new_thread() via z_setup_new_thread() after
213 * setup_thread_stack() but before thread starts running.
214 *
215 * Note that only user threads have privileged stacks and kernel
216 * only threads do not.
217 */
218 (void)memset(&header->privilege_stack[0], 0xaa, sizeof(header->privilege_stack));
219
220 #endif
221
222 #ifdef CONFIG_KERNEL_COHERENCE
223 sys_cache_data_flush_and_invd_range(&header->privilege_stack[0],
224 sizeof(header->privilege_stack));
225 #endif
226
227 /* Transition will reset stack pointer to initial, discarding
228 * any old context since this is a one-way operation
229 */
230 stack_end = Z_STACK_PTR_ALIGN(current->stack_info.start +
231 current->stack_info.size -
232 current->stack_info.delta);
233
234 xtensa_userspace_enter(user_entry, p1, p2, p3,
235 stack_end, current->stack_info.start);
236
237 CODE_UNREACHABLE;
238 }
239
arch_thread_priv_stack_space_get(const struct k_thread * thread,size_t * stack_size,size_t * unused_ptr)240 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
241 size_t *unused_ptr)
242 {
243 if (!IS_ENABLED(CONFIG_INIT_STACKS) || !IS_ENABLED(CONFIG_THREAD_STACK_INFO)) {
244 /*
245 * This is needed to ensure that the call to z_stack_space_get() below is properly
246 * dead-stripped when linking using LLVM / lld. For more info, please see issue
247 * #98491.
248 */
249 return -EINVAL;
250 }
251
252 struct xtensa_thread_stack_header *hdr_stack_obj;
253
254 if ((thread->base.user_options & K_USER) != K_USER) {
255 return -EINVAL;
256 }
257
258 hdr_stack_obj = (struct xtensa_thread_stack_header *)thread->stack_obj;
259
260 return z_stack_space_get(&hdr_stack_obj->privilege_stack[0],
261 sizeof(hdr_stack_obj->privilege_stack),
262 unused_ptr);
263 }
264 #endif /* CONFIG_USERSPACE */
265