1/*
2 * Copyright (c) 2022, Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <xtensa_asm2_s.h>
8#include <zephyr/offsets.h>
9#include <offsets_short.h>
10#include <zephyr/syscall.h>
11#include <zephyr/zsr.h>
12
13#include <xtensa/config/core-isa.h>
14
15/**
16 *  syscall number     arg1, arg2, arg3, arg4, arg5, arg6
17 *  --------------     ----------------------------------
18 *  a2                 a6,   a3,   a4,   a5,   a8,   a9
19 *
20 **/
21.pushsection .text.xtensa_do_syscall, "ax"
22.global	xtensa_do_syscall
23.align	4
24xtensa_do_syscall:
25#if XCHAL_HAVE_THREADPTR == 0
26	wsr a2, ZSR_SYSCALL_SCRATCH
27	rsync
28
29	movi a0, xtensa_is_user_context_epc
30	rsr.epc1 a2
31	bne a0, a2, _not_checking_user_context
32
33	addi a2, a2, 3
34	wsr.epc1 a2
35
36	movi a0, PS_RING_MASK
37	rsr.ps a2
38	and a2, a2, a0
39
40	/* Need to set return to 1 if RING != 0,
41	 * so we won't be leaking which ring we are in
42	 * right now.
43	 */
44	beqz a2, _is_user_context_return
45
46	movi a2, 1
47
48_is_user_context_return:
49	rsr a0, ZSR_A0SAVE
50
51	rfe
52
53_not_checking_user_context:
54	rsr a2, ZSR_SYSCALL_SCRATCH
55#endif
56	rsr a0, ZSR_CPU
57	l32i a0, a0, ___cpu_t_current_OFFSET
58	l32i a0, a0, _thread_offset_to_psp
59
60	addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF
61
62	s32i a1, a0, ___xtensa_irq_bsa_t_scratch_OFFSET
63	s32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET
64	s32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET
65	rsr a2, ZSR_A0SAVE
66	s32i a2, a0, ___xtensa_irq_bsa_t_a0_OFFSET
67	rsr.ps a2
68	movi a3, ~PS_OWB_MASK
69	and a2, a2, a3
70	s32i a2, a0, ___xtensa_irq_bsa_t_ps_OFFSET
71	rsr.epc1 a2
72	s32i a2, a0, ___xtensa_irq_bsa_t_pc_OFFSET
73
74#if XCHAL_HAVE_NMI
75	movi a2, PS_WOE|PS_INTLEVEL(XCHAL_NMILEVEL)
76#elif XCHAL_HAVE_INTERRUPTS
77	movi a2, PS_WOE|PS_INTLEVEL(XCHAL_NUM_INTLEVELS)
78#else
79#error Xtensa core with no interrupt support is used
80#endif
81	rsr.ps a3
82	or a3, a3, a2
83	movi a2, ~(PS_EXCM | PS_RING_MASK)
84	and a3, a3, a2
85	wsr.ps a3
86	rsync
87	l32i a2, a0, ___xtensa_irq_bsa_t_a2_OFFSET
88	l32i a3, a0, ___xtensa_irq_bsa_t_a3_OFFSET
89	SPILL_ALL_WINDOWS
90
91	rsr a0, ZSR_CPU
92	l32i a0, a0, ___cpu_t_current_OFFSET
93	l32i a0, a0, _thread_offset_to_psp
94	addi a0, a0, -___xtensa_irq_bsa_t_SIZEOF
95
96	mov a1, a0
97
98	l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
99#if XCHAL_HAVE_LOOPS
100	/* If the syscall instruction was the last instruction in the body of
101	 * a zero-overhead loop, and the loop will execute again, decrement
102	 * the loop count and resume execution at the head of the loop.
103	 */
104	rsr.lend a2
105	addi a3, a3, 3
106	bne a2, a3, end_loop
107	rsr.lcount a2
108	beqz a2, end_loop
109	addi a2, a2, -1
110	wsr.lcount a2
111	rsr.lbeg a3
112end_loop:
113#else
114	/* EPC1 (and now a3) contains the address that invoked syscall.
115	 * We need to increment it to execute the next instruction when
116	 * we return. The instruction size is 3 bytes, so lets just add it.
117	 */
118	addi a3, a3, 3
119#endif
120	s32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
121	ODD_REG_SAVE
122
123#if defined(CONFIG_XTENSA_HIFI_SHARING)
124	call0 _xtensa_hifi_save
125#endif
126
127	call0 xtensa_save_high_regs
128
129	l32i a2, a1, 0
130	l32i a2, a2, ___xtensa_irq_bsa_t_a2_OFFSET
131	movi a0, K_SYSCALL_LIMIT
132	bgeu a2, a0, _bad_syscall
133
134_id_ok:
135	/* Find the function handler for the given syscall id. */
136	movi a3, _k_syscall_table
137	slli a2, a2, 2
138	add a2, a2, a3
139	l32i a2, a2, 0
140
141#if XCHAL_HAVE_THREADPTR
142	/* Clear up the threadptr because it is used
143	 * to check if a thread is running on user mode. Since
144	 * we are in a interruption we don't want the system
145	 * thinking it is possibly running in user mode.
146	 */
147#ifdef CONFIG_THREAD_LOCAL_STORAGE
148	movi a0, is_user_mode@tpoff
149	rur.THREADPTR a3
150	add a0, a3, a0
151
152	movi a3, 0
153	s32i a3, a0, 0
154#else
155	movi a0, 0
156	wur.THREADPTR a0
157#endif
158#endif /* XCHAL_HAVE_THREADPTR */
159
160	/* Set syscall parameters by moving them into place before we do
161	 * a call4 for the syscall function itself.
162	 * arg1 = a6
163	 * arg2 = a3 (clobbered above, so we need to reload it)
164	 * arg3 = a4
165	 * arg4 = a5
166	 * arg5 = a8
167	 * arg6 = a9
168	 */
169	mov a10, a8
170	mov a11, a9
171	mov a8, a4
172	mov a9, a5
173
174	/* Stack frame pointer is the 7th argument to z_mrsh_*()
175	 * as ssf, and must be put on stack to be consumed.
176	 */
177	mov a3, a1
178	addi a1, a1, -4
179	s32i a3, a1, 0
180
181	l32i a3, a1, 4
182	l32i a7, a3, ___xtensa_irq_bsa_t_a3_OFFSET
183
184
185	/* Since we are unmasking EXCM, we need to set RING bits to kernel
186	 * mode, otherwise we won't be able to run the exception handler in C.
187	 */
188	movi a0, PS_WOE|PS_CALLINC(0)|PS_UM|PS_INTLEVEL(0)
189	wsr.ps a0
190	rsync
191
192	callx4 a2
193
194	/* Going back before stack frame pointer on stack to
195	 * actual the stack frame. So restoration of registers
196	 * can be done properly when finishing syscalls.
197	 */
198	addi a1, a1, 4
199
200	/* copy return value. Lets put it in the top of stack
201	 * because registers will be clobbered in
202         * xtensa_restore_high_regs
203	 */
204	l32i a3, a1, 0
205	s32i a6, a3, ___xtensa_irq_bsa_t_a2_OFFSET
206
207	j _syscall_returned
208
209_syscall_returned:
210	call0 xtensa_restore_high_regs
211
212	l32i a3, a1, ___xtensa_irq_bsa_t_sar_OFFSET
213	wsr a3, SAR
214#if XCHAL_HAVE_LOOPS
215	l32i a3, a1, ___xtensa_irq_bsa_t_lbeg_OFFSET
216	wsr a3, LBEG
217	l32i a3, a1, ___xtensa_irq_bsa_t_lend_OFFSET
218	wsr a3, LEND
219	l32i a3, a1, ___xtensa_irq_bsa_t_lcount_OFFSET
220	wsr a3, LCOUNT
221#endif
222#if XCHAL_HAVE_S32C1I
223	l32i a3, a1, ___xtensa_irq_bsa_t_scompare1_OFFSET
224	wsr a3, SCOMPARE1
225#endif
226
227#if XCHAL_HAVE_THREADPTR
228#ifdef CONFIG_THREAD_LOCAL_STORAGE
229	l32i a3, a1, ___xtensa_irq_bsa_t_threadptr_OFFSET
230	movi a0, is_user_mode@tpoff
231	add a0, a3, a0
232	movi a3, 1
233	s32i a3, a0, 0
234#else
235	rsr a3, ZSR_CPU
236	l32i a3, a3, ___cpu_t_current_OFFSET
237	wur.THREADPTR a3
238#endif
239#endif /* XCHAL_HAVE_THREADPTR */
240
241	l32i a3, a1, ___xtensa_irq_bsa_t_ps_OFFSET
242	wsr.ps a3
243
244	l32i a3, a1, ___xtensa_irq_bsa_t_pc_OFFSET
245	wsr.epc1 a3
246
247	l32i a0, a1, ___xtensa_irq_bsa_t_a0_OFFSET
248	l32i a2, a1, ___xtensa_irq_bsa_t_a2_OFFSET
249	l32i a3, a1, ___xtensa_irq_bsa_t_a3_OFFSET
250
251	l32i a1, a1, ___xtensa_irq_bsa_t_scratch_OFFSET
252	rsync
253
254	rfe
255
256_bad_syscall:
257	movi a2, K_SYSCALL_BAD
258	j _id_ok
259
260.popsection
261
262/* FUNC_NORETURN void xtensa_userspace_enter(k_thread_entry_t user_entry,
263 *					   void *p1, void *p2, void *p3,
264 *					   uint32_t stack_end,
265 *					   uint32_t stack_start)
266 *
267 * A one-way trip to userspace.
268 */
269.global xtensa_userspace_enter
270.type xtensa_userspace_enter, @function
271.align 4
272xtensa_userspace_enter:
273	/* Call entry to set a bit in the windowstart and
274	 * do the rotation, but we are going to set our own
275	 * stack.
276	 */
277	entry a1, 16
278
279	SPILL_ALL_WINDOWS
280
281	/* We have to switch to kernel stack before spill kernel data and
282	 * erase user stack to avoid leak from previous context.
283	 */
284	mov a1, a7 /* stack start (low address) */
285
286	rsr a0, ZSR_CPU
287	l32i a0, a0, ___cpu_t_current_OFFSET
288
289	addi a1, a1, -28
290	s32i a0, a1, 24
291	s32i a2, a1, 20
292	s32i a3, a1, 16
293	s32i a4, a1, 12
294	s32i a5, a1, 8
295	s32i a6, a1, 4
296	s32i a7, a1, 0
297
298	l32i a6, a1, 24
299	call4 xtensa_user_stack_perms
300
301	l32i a6, a1, 24
302#ifdef CONFIG_XTENSA_MMU
303	call4 xtensa_swap_update_page_tables
304#endif
305#ifdef CONFIG_XTENSA_MPU
306	call4 xtensa_mpu_map_write
307#endif
308
309#if XCHAL_HAVE_THREADPTR
310#ifdef CONFIG_THREAD_LOCAL_STORAGE
311	rur.threadptr a3
312	movi a0, is_user_mode@tpoff
313	add a0, a3, a0
314	movi a3, 1
315	s32i a3, a0, 0
316#else
317	rsr a3, ZSR_CPU
318	l32i a3, a3, ___cpu_t_current_OFFSET
319	wur.THREADPTR a3
320#endif
321#endif /* XCHAL_HAVE_THREADPTR */
322
323	/* Set now z_thread_entry parameters, we are simulating a call4
324	 * call, so parameters start at a6, a7, ...
325	 */
326	l32i a6, a1, 20
327	l32i a7, a1, 16
328	l32i a8, a1, 12
329	l32i a9, a1, 8
330
331	/* Go back to user stack */
332	l32i a1, a1, 4
333
334	movi a0, z_thread_entry
335	wsr.epc2 a0
336
337	/* Configuring PS register.
338	 * We have to set callinc as well, since the called
339	 * function will do "entry"
340	 */
341#ifdef CONFIG_XTENSA_MMU
342	movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(2)
343#endif
344#ifdef CONFIG_XTENSA_MPU
345	/* MPU only has RING 0 and 1. */
346	movi a0, PS_WOE|PS_CALLINC(1)|PS_UM|PS_RING(1)
347#endif
348
349	wsr a0, EPS2
350
351	/* Wipe out a0 (thre is no return from this function */
352	movi a0, 0
353
354	rfi 2
355