1 /*
2  * Copyright (c) 2022 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief Xtensa specific syscall header
10  *
11  * This header contains the Xtensa specific syscall interface.  It is
12  * included by the syscall interface architecture-abstraction header
13  * (include/arch/syscall.h)
14  */
15 
16 #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
17 #define ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
18 
19 #ifdef CONFIG_USERSPACE
20 #ifndef _ASMLANGUAGE
21 
22 #include <zephyr/types.h>
23 #include <stdbool.h>
24 #include <zephyr/linker/sections.h>
25 #include <zephyr/sys/util_macro.h>
26 
27 #include <xtensa/config/core-isa.h>
28 
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32 
33 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
34 uintptr_t xtensa_syscall_helper(uintptr_t arg1, uintptr_t arg2,
35 				uintptr_t arg3, uintptr_t arg4,
36 				uintptr_t arg5, uintptr_t arg6,
37 				uintptr_t call_id);
38 
39 #define SYSINL ALWAYS_INLINE
40 #else
41 #define SYSINL inline
42 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
43 
44 /**
45  * We are following Linux Xtensa syscall ABI:
46  *
47  *  syscall number     arg1, arg2, arg3, arg4, arg5, arg6
48  *  --------------     ----------------------------------
49  *  a2                 a6,   a3,   a4,   a5,   a8,   a9
50  *
51  **/
52 
arch_syscall_invoke6(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,uintptr_t call_id)53 static SYSINL uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
54 					     uintptr_t arg3, uintptr_t arg4,
55 					     uintptr_t arg5, uintptr_t arg6,
56 					     uintptr_t call_id)
57 {
58 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
59 	return xtensa_syscall_helper(arg1, arg2, arg3, arg4, arg5, arg6, call_id);
60 #else
61 	register uintptr_t a2 __asm__("%a2") = call_id;
62 	register uintptr_t a6 __asm__("%a6") = arg1;
63 	register uintptr_t a3 __asm__("%a3") = arg2;
64 	register uintptr_t a4 __asm__("%a4") = arg3;
65 	register uintptr_t a5 __asm__("%a5") = arg4;
66 	register uintptr_t a8 __asm__("%a8") = arg5;
67 	register uintptr_t a9 __asm__("%a9") = arg6;
68 
69 	__asm__ volatile("syscall\n\t"
70 			 : "=r" (a2)
71 			 : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
72 			   "r" (a5), "r" (a8), "r" (a9)
73 			 : "memory");
74 
75 	return a2;
76 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
77 }
78 
arch_syscall_invoke5(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t call_id)79 static SYSINL uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
80 					uintptr_t arg3, uintptr_t arg4,
81 					uintptr_t arg5, uintptr_t call_id)
82 {
83 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
84 	return xtensa_syscall_helper(arg1, arg2, arg3, arg4, arg5, 0, call_id);
85 #else
86 	register uintptr_t a2 __asm__("%a2") = call_id;
87 	register uintptr_t a6 __asm__("%a6") = arg1;
88 	register uintptr_t a3 __asm__("%a3") = arg2;
89 	register uintptr_t a4 __asm__("%a4") = arg3;
90 	register uintptr_t a5 __asm__("%a5") = arg4;
91 	register uintptr_t a8 __asm__("%a8") = arg5;
92 
93 	__asm__ volatile("syscall\n\t"
94 			 : "=r" (a2)
95 			 : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
96 			   "r" (a5), "r" (a8)
97 			 : "memory");
98 
99 	return a2;
100 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
101 }
102 
arch_syscall_invoke4(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t call_id)103 static SYSINL uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
104 					uintptr_t arg3, uintptr_t arg4,
105 					uintptr_t call_id)
106 {
107 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
108 	return xtensa_syscall_helper(arg1, arg2, arg3, arg4, 0, 0, call_id);
109 #else
110 	register uintptr_t a2 __asm__("%a2") = call_id;
111 	register uintptr_t a6 __asm__("%a6") = arg1;
112 	register uintptr_t a3 __asm__("%a3") = arg2;
113 	register uintptr_t a4 __asm__("%a4") = arg3;
114 	register uintptr_t a5 __asm__("%a5") = arg4;
115 
116 	__asm__ volatile("syscall\n\t"
117 			 : "=r" (a2)
118 			 : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
119 			   "r" (a5)
120 			 : "memory");
121 
122 	return a2;
123 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
124 }
125 
arch_syscall_invoke3(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t call_id)126 static SYSINL uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
127 					uintptr_t arg3, uintptr_t call_id)
128 {
129 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
130 	return xtensa_syscall_helper(arg1, arg2, arg3, 0, 0, 0, call_id);
131 #else
132 	register uintptr_t a2 __asm__("%a2") = call_id;
133 	register uintptr_t a6 __asm__("%a6") = arg1;
134 	register uintptr_t a3 __asm__("%a3") = arg2;
135 	register uintptr_t a4 __asm__("%a4") = arg3;
136 
137 	__asm__ volatile("syscall\n\t"
138 			 : "=r" (a2)
139 			 : "r" (a2), "r" (a6), "r" (a3), "r" (a4)
140 			 : "memory");
141 
142 	return a2;
143 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
144 }
145 
arch_syscall_invoke2(uintptr_t arg1,uintptr_t arg2,uintptr_t call_id)146 static SYSINL uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
147 					uintptr_t call_id)
148 {
149 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
150 	return xtensa_syscall_helper(arg1, arg2, 0, 0, 0, 0, call_id);
151 #else
152 	register uintptr_t a2 __asm__("%a2") = call_id;
153 	register uintptr_t a6 __asm__("%a6") = arg1;
154 	register uintptr_t a3 __asm__("%a3") = arg2;
155 
156 	__asm__ volatile("syscall\n\t"
157 			 : "=r" (a2)
158 			 : "r" (a2), "r" (a6), "r" (a3)
159 			 : "memory");
160 
161 	return a2;
162 #endif
163 }
164 
arch_syscall_invoke1(uintptr_t arg1,uintptr_t call_id)165 static SYSINL uintptr_t arch_syscall_invoke1(uintptr_t arg1,
166 					uintptr_t call_id)
167 {
168 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
169 	return xtensa_syscall_helper(arg1, 0, 0, 0, 0, 0, call_id);
170 #else
171 	register uintptr_t a2 __asm__("%a2") = call_id;
172 	register uintptr_t a6 __asm__("%a6") = arg1;
173 
174 	__asm__ volatile("syscall\n\t"
175 			 : "=r" (a2)
176 			 : "r" (a2), "r" (a6)
177 			 : "memory");
178 
179 	return a2;
180 #endif
181 }
182 
arch_syscall_invoke0(uintptr_t call_id)183 static SYSINL uintptr_t arch_syscall_invoke0(uintptr_t call_id)
184 {
185 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
186 	return xtensa_syscall_helper(0, 0, 0, 0, 0, 0, call_id);
187 #else
188 	register uintptr_t a2 __asm__("%a2") = call_id;
189 
190 	__asm__ volatile("syscall\n\t"
191 			 : "=r" (a2)
192 			 : "r" (a2)
193 			 : "memory");
194 
195 	return a2;
196 #endif
197 }
198 
199 /*
200  * There is no easy (or generic) way to figure out if a thread is runnining
201  * in un-privileged mode. Reading the current ring (PS.CRING) is a privileged
202  * instruction and not thread local storage is not available in xcc.
203  */
arch_is_user_context(void)204 static inline bool arch_is_user_context(void)
205 {
206 #if XCHAL_HAVE_THREADPTR
207 	uint32_t thread;
208 
209 	__asm__ volatile(
210 		"rur.THREADPTR %0\n\t"
211 		: "=a" (thread)
212 	);
213 #ifdef CONFIG_THREAD_LOCAL_STORAGE
214 	extern __thread uint32_t is_user_mode;
215 
216 	if (!thread) {
217 		return false;
218 	}
219 
220 	return is_user_mode != 0;
221 #else
222 	return !!thread;
223 #endif
224 
225 #else /* XCHAL_HAVE_THREADPTR */
226 	extern bool xtensa_is_user_context(void);
227 
228 	return xtensa_is_user_context();
229 #endif /* XCHAL_HAVE_THREADPTR */
230 }
231 
232 #undef SYSINL
233 
234 #ifdef __cplusplus
235 }
236 #endif
237 
238 #endif /* _ASMLANGUAGE */
239 #endif /* CONFIG_USERSPACE */
240 #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_ */
241