1 /*
2 * Copyright (c) 2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 * @brief Xtensa specific syscall header
10 *
11 * This header contains the Xtensa specific syscall interface. It is
12 * included by the syscall interface architecture-abstraction header
13 * (include/arch/syscall.h)
14 */
15
16 #ifndef ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
17 #define ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_
18
19 #ifdef CONFIG_USERSPACE
20 #ifndef _ASMLANGUAGE
21
22 #include <zephyr/types.h>
23 #include <stdbool.h>
24 #include <zephyr/linker/sections.h>
25 #include <zephyr/sys/util_macro.h>
26
27 #include <xtensa/config/core-isa.h>
28
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32
33 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
34 uintptr_t xtensa_syscall_helper_args_6(uintptr_t arg1, uintptr_t arg2,
35 uintptr_t arg3, uintptr_t arg4,
36 uintptr_t arg5, uintptr_t arg6,
37 uintptr_t call_id);
38
39 uintptr_t xtensa_syscall_helper_args_5(uintptr_t arg1, uintptr_t arg2,
40 uintptr_t arg3, uintptr_t arg4,
41 uintptr_t arg5, uintptr_t call_id);
42
43 uintptr_t xtensa_syscall_helper_args_4(uintptr_t arg1, uintptr_t arg2,
44 uintptr_t arg3, uintptr_t arg4,
45 uintptr_t call_id);
46
47 #define SYSINL ALWAYS_INLINE
48 #else
49 #define SYSINL inline
50 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
51
52 /**
53 * We are following Linux Xtensa syscall ABI:
54 *
55 * syscall number arg1, arg2, arg3, arg4, arg5, arg6
56 * -------------- ----------------------------------
57 * a2 a6, a3, a4, a5, a8, a9
58 *
59 **/
60
61
arch_syscall_invoke6(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,uintptr_t call_id)62 static SYSINL uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
63 uintptr_t arg3, uintptr_t arg4,
64 uintptr_t arg5, uintptr_t arg6,
65 uintptr_t call_id)
66 {
67 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
68 return xtensa_syscall_helper_args_6(arg1, arg2, arg3, arg4, arg5, arg6, call_id);
69 #else
70 register uintptr_t a2 __asm__("%a2") = call_id;
71 register uintptr_t a6 __asm__("%a6") = arg1;
72 register uintptr_t a3 __asm__("%a3") = arg2;
73 register uintptr_t a4 __asm__("%a4") = arg3;
74 register uintptr_t a5 __asm__("%a5") = arg4;
75 register uintptr_t a8 __asm__("%a8") = arg5;
76 register uintptr_t a9 __asm__("%a9") = arg6;
77
78 __asm__ volatile("syscall\n\t"
79 : "=r" (a2)
80 : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
81 "r" (a5), "r" (a8), "r" (a9)
82 : "memory");
83
84 return a2;
85 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
86 }
87
arch_syscall_invoke5(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t call_id)88 static SYSINL uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
89 uintptr_t arg3, uintptr_t arg4,
90 uintptr_t arg5, uintptr_t call_id)
91 {
92 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
93 return xtensa_syscall_helper_args_5(arg1, arg2, arg3, arg4, arg5, call_id);
94 #else
95 register uintptr_t a2 __asm__("%a2") = call_id;
96 register uintptr_t a6 __asm__("%a6") = arg1;
97 register uintptr_t a3 __asm__("%a3") = arg2;
98 register uintptr_t a4 __asm__("%a4") = arg3;
99 register uintptr_t a5 __asm__("%a5") = arg4;
100 register uintptr_t a8 __asm__("%a8") = arg5;
101
102 __asm__ volatile("syscall\n\t"
103 : "=r" (a2)
104 : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
105 "r" (a5), "r" (a8)
106 : "memory");
107
108 return a2;
109 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
110 }
111
arch_syscall_invoke4(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t call_id)112 static SYSINL uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
113 uintptr_t arg3, uintptr_t arg4,
114 uintptr_t call_id)
115 {
116 #ifdef CONFIG_XTENSA_SYSCALL_USE_HELPER
117 return xtensa_syscall_helper_args_4(arg1, arg2, arg3, arg4, call_id);
118 #else
119 register uintptr_t a2 __asm__("%a2") = call_id;
120 register uintptr_t a6 __asm__("%a6") = arg1;
121 register uintptr_t a3 __asm__("%a3") = arg2;
122 register uintptr_t a4 __asm__("%a4") = arg3;
123 register uintptr_t a5 __asm__("%a5") = arg4;
124
125 __asm__ volatile("syscall\n\t"
126 : "=r" (a2)
127 : "r" (a2), "r" (a6), "r" (a3), "r" (a4),
128 "r" (a5)
129 : "memory");
130
131 return a2;
132 #endif /* CONFIG_XTENSA_SYSCALL_USE_HELPER */
133 }
134
arch_syscall_invoke3(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t call_id)135 static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
136 uintptr_t arg3, uintptr_t call_id)
137 {
138 register uintptr_t a2 __asm__("%a2") = call_id;
139 register uintptr_t a6 __asm__("%a6") = arg1;
140 register uintptr_t a3 __asm__("%a3") = arg2;
141 register uintptr_t a4 __asm__("%a4") = arg3;
142
143 __asm__ volatile("syscall\n\t"
144 : "=r" (a2)
145 : "r" (a2), "r" (a6), "r" (a3), "r" (a4)
146 : "memory");
147
148 return a2;
149 }
150
arch_syscall_invoke2(uintptr_t arg1,uintptr_t arg2,uintptr_t call_id)151 static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
152 uintptr_t call_id)
153 {
154 register uintptr_t a2 __asm__("%a2") = call_id;
155 register uintptr_t a6 __asm__("%a6") = arg1;
156 register uintptr_t a3 __asm__("%a3") = arg2;
157
158 __asm__ volatile("syscall\n\t"
159 : "=r" (a2)
160 : "r" (a2), "r" (a6), "r" (a3)
161 : "memory");
162
163 return a2;
164 }
165
arch_syscall_invoke1(uintptr_t arg1,uintptr_t call_id)166 static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
167 {
168 register uintptr_t a2 __asm__("%a2") = call_id;
169 register uintptr_t a6 __asm__("%a6") = arg1;
170
171 __asm__ volatile("syscall\n\t"
172 : "=r" (a2)
173 : "r" (a2), "r" (a6)
174 : "memory");
175
176 return a2;
177 }
178
arch_syscall_invoke0(uintptr_t call_id)179 static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
180 {
181 register uintptr_t a2 __asm__("%a2") = call_id;
182
183 __asm__ volatile("syscall\n\t"
184 : "=r" (a2)
185 : "r" (a2)
186 : "memory");
187
188 return a2;
189 }
190
191 /*
192 * There is no easy (or generic) way to figure out if a thread is runnining
193 * in un-privileged mode. Reading the current ring (PS.CRING) is a privileged
194 * instruction and not thread local storage is not available in xcc.
195 */
arch_is_user_context(void)196 static inline bool arch_is_user_context(void)
197 {
198 #if XCHAL_HAVE_THREADPTR
199 uint32_t thread;
200
201 __asm__ volatile(
202 "rur.THREADPTR %0\n\t"
203 : "=a" (thread)
204 );
205 #ifdef CONFIG_THREAD_LOCAL_STORAGE
206 extern Z_THREAD_LOCAL uint32_t is_user_mode;
207
208 if (!thread) {
209 return false;
210 }
211
212 return is_user_mode != 0;
213 #else
214 return !!thread;
215 #endif
216
217 #else /* XCHAL_HAVE_THREADPTR */
218 extern bool xtensa_is_user_context(void);
219
220 return xtensa_is_user_context();
221 #endif /* XCHAL_HAVE_THREADPTR */
222 }
223
224 #undef SYSINL
225
226 #ifdef __cplusplus
227 }
228 #endif
229
230 #endif /* _ASMLANGUAGE */
231 #endif /* CONFIG_USERSPACE */
232 #endif /* ZEPHYR_INCLUDE_ARCH_XTENSA_SYSCALL_H_ */
233