1 /*
2 * Copyright (c) 2017, 2023 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <errno.h>
8 #include <string.h>
9
10 #include <zephyr/kernel.h>
11 #include <kernel_internal.h>
12
13 #include <xtensa_asm2_context.h>
14 #include <xtensa_internal.h>
15
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
18
19 #ifdef CONFIG_USERSPACE
20
21 #ifdef CONFIG_THREAD_LOCAL_STORAGE
22 /*
23 * Per-thread (TLS) variable indicating whether execution is in user mode.
24 */
25 Z_THREAD_LOCAL uint32_t is_user_mode;
26 #endif
27
28 #endif /* CONFIG_USERSPACE */
29
30 /**
31 * Initializes a stack area such that it can be "restored" later and
32 * begin running with the specified function and three arguments. The
33 * entry function takes three arguments to match the signature of
34 * Zephyr's k_thread_entry_t. Thread will start with EXCM clear and
35 * INTLEVEL set to zero (i.e. it's a user thread, we don't start with
36 * anything masked, so don't assume that!).
37 */
init_stack(struct k_thread * thread,int * stack_top,void (* entry)(void *,void *,void *),void * arg1,void * arg2,void * arg3)38 static void *init_stack(struct k_thread *thread, int *stack_top,
39 void (*entry)(void *, void *, void *),
40 void *arg1, void *arg2, void *arg3)
41 {
42 void *ret;
43 _xtensa_irq_stack_frame_a11_t *frame;
44 #ifdef CONFIG_USERSPACE
45 struct xtensa_thread_stack_header *header =
46 (struct xtensa_thread_stack_header *)thread->stack_obj;
47
48 thread->arch.psp = header->privilege_stack +
49 sizeof(header->privilege_stack);
50 #endif
51
52 /* Not-a-cpu ID Ensures that the first time this is run, the
53 * stack will be invalidated. That covers the edge case of
54 * restarting a thread on a stack that had previously been run
55 * on one CPU, but then initialized on this one, and
56 * potentially run THERE and not HERE.
57 */
58 thread->arch.last_cpu = -1;
59
60 /* We cheat and shave 16 bytes off, the top four words are the
61 * A0-A3 spill area for the caller of the entry function,
62 * which doesn't exist. It will never be touched, so we
63 * arrange to enter the function with a CALLINC of 1 and a
64 * stack pointer 16 bytes above the top, so its ENTRY at the
65 * start will decrement the stack pointer by 16.
66 */
67 const int bsasz = sizeof(*frame) - 16;
68
69 frame = (void *)(((char *) stack_top) - bsasz);
70
71 (void)memset(frame, 0, bsasz);
72
73 frame->bsa.ps = PS_WOE | PS_UM | PS_CALLINC(1);
74 #ifdef CONFIG_USERSPACE
75 if ((thread->base.user_options & K_USER) == K_USER) {
76 #ifdef CONFIG_INIT_STACKS
77 /* setup_thread_stack() does not initialize the architecture specific
78 * privileged stack. So we need to do it manually here as this function
79 * is called by arch_new_thread() via z_setup_new_thread() after
80 * setup_thread_stack() but before thread starts running.
81 *
82 * Note that only user threads have privileged stacks and kernel
83 * only threads do not.
84 */
85 (void)memset(&header->privilege_stack[0], 0xaa, sizeof(header->privilege_stack));
86 #endif
87
88 frame->bsa.pc = (uintptr_t)arch_user_mode_enter;
89 } else {
90 frame->bsa.pc = (uintptr_t)z_thread_entry;
91 }
92 #else
93 frame->bsa.pc = (uintptr_t)z_thread_entry;
94 #endif
95
96 #if XCHAL_HAVE_THREADPTR
97 #ifdef CONFIG_THREAD_LOCAL_STORAGE
98 frame->bsa.threadptr = thread->tls;
99 #elif CONFIG_USERSPACE
100 frame->bsa.threadptr = (uintptr_t)((thread->base.user_options & K_USER) ? thread : NULL);
101 #endif
102 #endif
103
104 /* Arguments to z_thread_entry(). Remember these start at A6,
105 * which will be rotated into A2 by the ENTRY instruction that
106 * begins the C function. And A4-A7 and A8-A11 are optional
107 * quads that live below the BSA!
108 */
109 frame->a7 = (uintptr_t)arg1; /* a7 */
110 frame->a6 = (uintptr_t)entry; /* a6 */
111 frame->a5 = 0; /* a5 */
112 frame->a4 = 0; /* a4 */
113
114 frame->a11 = 0; /* a11 */
115 frame->a10 = 0; /* a10 */
116 frame->a9 = (uintptr_t)arg3; /* a9 */
117 frame->a8 = (uintptr_t)arg2; /* a8 */
118
119 /* Finally push the BSA pointer and return the stack pointer
120 * as the handle
121 */
122 frame->ptr_to_bsa = (void *)&frame->bsa;
123 ret = &frame->ptr_to_bsa;
124
125 return ret;
126 }
127
arch_new_thread(struct k_thread * thread,k_thread_stack_t * stack,char * stack_ptr,k_thread_entry_t entry,void * p1,void * p2,void * p3)128 void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
129 char *stack_ptr, k_thread_entry_t entry,
130 void *p1, void *p2, void *p3)
131 {
132 thread->switch_handle = init_stack(thread, (int *)stack_ptr, entry,
133 p1, p2, p3);
134 #ifdef CONFIG_KERNEL_COHERENCE
135 __ASSERT((((size_t)stack) % XCHAL_DCACHE_LINESIZE) == 0, "");
136 __ASSERT((((size_t)stack_ptr) % XCHAL_DCACHE_LINESIZE) == 0, "");
137 sys_cache_data_flush_and_invd_range(stack, (char *)stack_ptr - (char *)stack);
138 #endif
139 }
140
141 #if defined(CONFIG_FPU) && defined(CONFIG_FPU_SHARING)
arch_float_disable(struct k_thread * thread)142 int arch_float_disable(struct k_thread *thread)
143 {
144 /* xtensa always has FPU enabled so cannot be disabled */
145 return -ENOTSUP;
146 }
147
arch_float_enable(struct k_thread * thread,unsigned int options)148 int arch_float_enable(struct k_thread *thread, unsigned int options)
149 {
150 /* xtensa always has FPU enabled so nothing to do here */
151 return 0;
152 }
153 #endif /* CONFIG_FPU && CONFIG_FPU_SHARING */
154
155 #ifdef CONFIG_USERSPACE
arch_user_mode_enter(k_thread_entry_t user_entry,void * p1,void * p2,void * p3)156 FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
157 void *p1, void *p2, void *p3)
158 {
159 struct k_thread *current = arch_current_thread();
160 size_t stack_end;
161
162 /* Transition will reset stack pointer to initial, discarding
163 * any old context since this is a one-way operation
164 */
165 stack_end = Z_STACK_PTR_ALIGN(current->stack_info.start +
166 current->stack_info.size -
167 current->stack_info.delta);
168
169 xtensa_userspace_enter(user_entry, p1, p2, p3,
170 stack_end, current->stack_info.start);
171
172 CODE_UNREACHABLE;
173 }
174
arch_thread_priv_stack_space_get(const struct k_thread * thread,size_t * stack_size,size_t * unused_ptr)175 int arch_thread_priv_stack_space_get(const struct k_thread *thread, size_t *stack_size,
176 size_t *unused_ptr)
177 {
178 struct xtensa_thread_stack_header *hdr_stack_obj;
179
180 if ((thread->base.user_options & K_USER) != K_USER) {
181 return -EINVAL;
182 }
183
184 hdr_stack_obj = (struct xtensa_thread_stack_header *)thread->stack_obj;
185
186 return z_stack_space_get(&hdr_stack_obj->privilege_stack[0],
187 sizeof(hdr_stack_obj->privilege_stack),
188 unused_ptr);
189 }
190 #endif /* CONFIG_USERSPACE */
191