1 /*
2  * Copyright (c) 2018 Linaro Limited.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /**
8  * @file
9  * @brief x86 (IA32) specific syscall header
10  *
11  * This header contains the x86 specific syscall interface.  It is
12  * included by the syscall interface architecture-abstraction header
13  * (include/arch/syscall.h)
14  */
15 
16 #ifndef ZEPHYR_INCLUDE_ARCH_X86_IA32_SYSCALL_H_
17 #define ZEPHYR_INCLUDE_ARCH_X86_IA32_SYSCALL_H_
18 
19 #define USER_CODE_SEG	0x2b /* at dpl=3 */
20 #define USER_DATA_SEG	0x33 /* at dpl=3 */
21 
22 #ifdef CONFIG_USERSPACE
23 #ifndef _ASMLANGUAGE
24 
25 #include <zephyr/types.h>
26 #include <stdbool.h>
27 #include <zephyr/linker/sections.h>
28 
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32 
33 /* Syscall invocation macros. x86-specific machine constraints used to ensure
34  * args land in the proper registers, see implementation of
35  * z_x86_syscall_entry_stub in userspace.S
36  */
37 
38 __pinned_func
arch_syscall_invoke6(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t arg6,uintptr_t call_id)39 static inline uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2,
40 					     uintptr_t arg3, uintptr_t arg4,
41 					     uintptr_t arg5, uintptr_t arg6,
42 					     uintptr_t call_id)
43 {
44 	uint32_t ret;
45 
46 	__asm__ volatile("push %%ebp\n\t"
47 			 "mov %[arg6], %%ebp\n\t"
48 			 "int $0x80\n\t"
49 			 "pop %%ebp\n\t"
50 			 : "=a" (ret)
51 			 : "S" (call_id), "a" (arg1), "d" (arg2),
52 			   "c" (arg3), "b" (arg4), "D" (arg5),
53 			   [arg6] "m" (arg6)
54 			 : "memory");
55 	return ret;
56 }
57 
58 __pinned_func
arch_syscall_invoke5(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t arg5,uintptr_t call_id)59 static inline uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2,
60 					     uintptr_t arg3, uintptr_t arg4,
61 					     uintptr_t arg5,
62 					     uintptr_t call_id)
63 {
64 	uint32_t ret;
65 
66 	__asm__ volatile("int $0x80"
67 			 : "=a" (ret)
68 			 : "S" (call_id), "a" (arg1), "d" (arg2),
69 			   "c" (arg3), "b" (arg4), "D" (arg5)
70 			 : "memory");
71 	return ret;
72 }
73 
74 __pinned_func
arch_syscall_invoke4(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4,uintptr_t call_id)75 static inline uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2,
76 					     uintptr_t arg3, uintptr_t arg4,
77 					     uintptr_t call_id)
78 {
79 	uint32_t ret;
80 
81 	__asm__ volatile("int $0x80"
82 			 : "=a" (ret)
83 			 : "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3),
84 			   "b" (arg4)
85 			 : "memory");
86 	return ret;
87 }
88 
89 __pinned_func
arch_syscall_invoke3(uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t call_id)90 static inline uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2,
91 					     uintptr_t arg3,
92 					     uintptr_t call_id)
93 {
94 	uint32_t ret;
95 
96 	__asm__ volatile("int $0x80"
97 			 : "=a" (ret)
98 			 : "S" (call_id), "a" (arg1), "d" (arg2), "c" (arg3)
99 			 : "memory");
100 	return ret;
101 }
102 
103 __pinned_func
arch_syscall_invoke2(uintptr_t arg1,uintptr_t arg2,uintptr_t call_id)104 static inline uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2,
105 					     uintptr_t call_id)
106 {
107 	uint32_t ret;
108 
109 	__asm__ volatile("int $0x80"
110 			 : "=a" (ret)
111 			 : "S" (call_id), "a" (arg1), "d" (arg2)
112 			 : "memory"
113 			 );
114 	return ret;
115 }
116 
117 __pinned_func
arch_syscall_invoke1(uintptr_t arg1,uintptr_t call_id)118 static inline uintptr_t arch_syscall_invoke1(uintptr_t arg1,
119 					     uintptr_t call_id)
120 {
121 	uint32_t ret;
122 
123 	__asm__ volatile("int $0x80"
124 			 : "=a" (ret)
125 			 : "S" (call_id), "a" (arg1)
126 			 : "memory"
127 			 );
128 	return ret;
129 }
130 
131 __pinned_func
arch_syscall_invoke0(uintptr_t call_id)132 static inline uintptr_t arch_syscall_invoke0(uintptr_t call_id)
133 {
134 	uint32_t ret;
135 
136 	__asm__ volatile("int $0x80"
137 			 : "=a" (ret)
138 			 : "S" (call_id)
139 			 : "memory"
140 			 );
141 	return ret;
142 }
143 
144 __pinned_func
arch_is_user_context(void)145 static inline bool arch_is_user_context(void)
146 {
147 	int cs;
148 
149 	/* On x86, read the CS register (which cannot be manually set) */
150 	__asm__ volatile ("mov %%cs, %[cs_val]" : [cs_val] "=r" (cs));
151 
152 	return cs == USER_CODE_SEG;
153 }
154 
155 
156 #ifdef __cplusplus
157 }
158 #endif
159 
160 #endif /* _ASMLANGUAGE */
161 #endif /* CONFIG_USERSPACE */
162 #endif /* ZEPHYR_INCLUDE_ARCH_X86_IA32_SYSCALL_H_ */
163