1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3 
4 #ifndef _ASMANDES_UACCESS_H
5 #define _ASMANDES_UACCESS_H
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/sched.h>
11 #include <asm/errno.h>
12 #include <asm/memory.h>
13 #include <asm/types.h>
14 
15 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
16 
17 /*
18  * The exception table consists of pairs of addresses: the first is the
19  * address of an instruction that is allowed to fault, and the second is
20  * the address at which the program should continue.  No registers are
21  * modified, so it is entirely up to the continuation code to figure out
22  * what to do.
23  *
24  * All the routines below use bits of fixup code that are out of line
25  * with the main instruction path.  This means when everything is well,
26  * we don't even have to jump over them.  Further, they do not intrude
27  * on our cache or tlb entries.
28  */
29 
30 struct exception_table_entry {
31 	unsigned long insn, fixup;
32 };
33 
34 extern int fixup_exception(struct pt_regs *regs);
35 
36 #define KERNEL_DS 	((mm_segment_t) { ~0UL })
37 #define USER_DS		((mm_segment_t) {TASK_SIZE - 1})
38 
39 #define get_fs()	(current_thread_info()->addr_limit)
40 #define user_addr_max	get_fs
41 
set_fs(mm_segment_t fs)42 static inline void set_fs(mm_segment_t fs)
43 {
44 	current_thread_info()->addr_limit = fs;
45 }
46 
47 #define uaccess_kernel()	(get_fs() == KERNEL_DS)
48 
49 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
50 
51 #define access_ok(addr, size)	\
52 	__range_ok((unsigned long)addr, (unsigned long)size)
53 /*
54  * Single-value transfer routines.  They automatically use the right
55  * size if we just have the right pointer type.  Note that the functions
56  * which read from user space (*get_*) need to take care not to leak
57  * kernel data even if the calling code is buggy and fails to check
58  * the return value.  This means zeroing out the destination variable
59  * or buffer on error.  Normally this is done out of line by the
60  * fixup code, but there are a few places where it intrudes on the
61  * main code path.  When we only write to user space, there is no
62  * problem.
63  *
64  * The "__xxx" versions of the user access functions do not verify the
65  * address space - it must have been done previously with a separate
66  * "access_ok()" call.
67  *
68  * The "xxx_error" versions set the third argument to EFAULT if an
69  * error occurs, and leave it unchanged on success.  Note that these
70  * versions are void (ie, don't return a value as such).
71  */
72 
73 #define get_user	__get_user					\
74 
75 #define __get_user(x, ptr)						\
76 ({									\
77 	long __gu_err = 0;						\
78 	__get_user_check((x), (ptr), __gu_err);				\
79 	__gu_err;							\
80 })
81 
82 #define __get_user_error(x, ptr, err)					\
83 ({									\
84 	__get_user_check((x), (ptr), (err));				\
85 	(void)0;							\
86 })
87 
88 #define __get_user_check(x, ptr, err)					\
89 ({									\
90 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
91 	might_fault();							\
92 	if (access_ok(__p, sizeof(*__p))) {		\
93 		__get_user_err((x), __p, (err));			\
94 	} else {							\
95 		(x) = 0; (err) = -EFAULT;				\
96 	}								\
97 })
98 
99 #define __get_user_err(x, ptr, err)					\
100 do {									\
101 	unsigned long __gu_val;						\
102 	__chk_user_ptr(ptr);						\
103 	switch (sizeof(*(ptr))) {					\
104 	case 1:								\
105 		__get_user_asm("lbi", __gu_val, (ptr), (err));		\
106 		break;							\
107 	case 2:								\
108 		__get_user_asm("lhi", __gu_val, (ptr), (err));		\
109 		break;							\
110 	case 4:								\
111 		__get_user_asm("lwi", __gu_val, (ptr), (err));		\
112 		break;							\
113 	case 8:								\
114 		__get_user_asm_dword(__gu_val, (ptr), (err));		\
115 		break;							\
116 	default:							\
117 		BUILD_BUG(); 						\
118 		break;							\
119 	}								\
120 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
121 } while (0)
122 
123 #define __get_user_asm(inst, x, addr, err)				\
124 	__asm__ __volatile__ (						\
125 		"1:	"inst"	%1,[%2]\n"				\
126 		"2:\n"							\
127 		"	.section .fixup,\"ax\"\n"			\
128 		"	.align	2\n"					\
129 		"3:	move %0, %3\n"					\
130 		"	move %1, #0\n"					\
131 		"	b	2b\n"					\
132 		"	.previous\n"					\
133 		"	.section __ex_table,\"a\"\n"			\
134 		"	.align	3\n"					\
135 		"	.long	1b, 3b\n"				\
136 		"	.previous"					\
137 		: "+r" (err), "=&r" (x)					\
138 		: "r" (addr), "i" (-EFAULT)				\
139 		: "cc")
140 
141 #ifdef __NDS32_EB__
142 #define __gu_reg_oper0 "%H1"
143 #define __gu_reg_oper1 "%L1"
144 #else
145 #define __gu_reg_oper0 "%L1"
146 #define __gu_reg_oper1 "%H1"
147 #endif
148 
149 #define __get_user_asm_dword(x, addr, err) 				\
150 	__asm__ __volatile__ (						\
151 		"\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"			\
152 		"\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"			\
153 		"3:\n"							\
154 		"	.section .fixup,\"ax\"\n"			\
155 		"	.align	2\n"					\
156 		"4:	move	%0, %3\n"				\
157 		"	b	3b\n"					\
158 		"	.previous\n"					\
159 		"	.section __ex_table,\"a\"\n"			\
160 		"	.align	3\n"					\
161 		"	.long	1b, 4b\n"				\
162 		"	.long	2b, 4b\n"				\
163 		"	.previous"					\
164 		: "+r"(err), "=&r"(x)					\
165 		: "r"(addr), "i"(-EFAULT)				\
166 		: "cc")
167 
168 #define put_user	__put_user					\
169 
170 #define __put_user(x, ptr)						\
171 ({									\
172 	long __pu_err = 0;						\
173 	__put_user_err((x), (ptr), __pu_err);				\
174 	__pu_err;							\
175 })
176 
177 #define __put_user_error(x, ptr, err)					\
178 ({									\
179 	__put_user_err((x), (ptr), (err));				\
180 	(void)0;							\
181 })
182 
183 #define __put_user_check(x, ptr, err)					\
184 ({									\
185 	__typeof__(*(ptr)) __user *__p = (ptr);				\
186 	might_fault();							\
187 	if (access_ok(__p, sizeof(*__p))) {		\
188 		__put_user_err((x), __p, (err));			\
189 	} else	{							\
190 		(err) = -EFAULT;					\
191 	}								\
192 })
193 
194 #define __put_user_err(x, ptr, err)					\
195 do {									\
196 	__typeof__(*(ptr)) __pu_val = (x);				\
197 	__chk_user_ptr(ptr);						\
198 	switch (sizeof(*(ptr))) {					\
199 	case 1:								\
200 		__put_user_asm("sbi", __pu_val, (ptr), (err));		\
201 		break;							\
202 	case 2: 							\
203 		__put_user_asm("shi", __pu_val, (ptr), (err));		\
204 		break;							\
205 	case 4: 							\
206 		__put_user_asm("swi", __pu_val, (ptr), (err));		\
207 		break;							\
208 	case 8:								\
209 		__put_user_asm_dword(__pu_val, (ptr), (err));		\
210 		break;							\
211 	default:							\
212 		BUILD_BUG(); 						\
213 		break;							\
214 	}								\
215 } while (0)
216 
217 #define __put_user_asm(inst, x, addr, err)				\
218 	__asm__ __volatile__ (						\
219 		"1:	"inst"	%1,[%2]\n"				\
220 		"2:\n"							\
221 		"	.section .fixup,\"ax\"\n"			\
222 		"	.align	2\n"					\
223 		"3:	move	%0, %3\n"				\
224 		"	b	2b\n"					\
225 		"	.previous\n"					\
226 		"	.section __ex_table,\"a\"\n"			\
227 		"	.align	3\n"					\
228 		"	.long	1b, 3b\n"				\
229 		"	.previous"					\
230 		: "+r" (err)						\
231 		: "r" (x), "r" (addr), "i" (-EFAULT)			\
232 		: "cc")
233 
234 #ifdef __NDS32_EB__
235 #define __pu_reg_oper0 "%H2"
236 #define __pu_reg_oper1 "%L2"
237 #else
238 #define __pu_reg_oper0 "%L2"
239 #define __pu_reg_oper1 "%H2"
240 #endif
241 
242 #define __put_user_asm_dword(x, addr, err) 				\
243 	__asm__ __volatile__ (						\
244 		"\n1:\tswi " __pu_reg_oper0 ",[%1]\n"			\
245 		"\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"			\
246 		"3:\n"							\
247 		"	.section .fixup,\"ax\"\n"			\
248 		"	.align	2\n"					\
249 		"4:	move	%0, %3\n"				\
250 		"	b	3b\n"					\
251 		"	.previous\n"					\
252 		"	.section __ex_table,\"a\"\n"			\
253 		"	.align	3\n"					\
254 		"	.long	1b, 4b\n"				\
255 		"	.long	2b, 4b\n"				\
256 		"	.previous"					\
257 		: "+r"(err)						\
258 		: "r"(addr), "r"(x), "i"(-EFAULT)			\
259 		: "cc")
260 
261 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
262 extern long strncpy_from_user(char *dest, const char __user * src, long count);
263 extern __must_check long strlen_user(const char __user * str);
264 extern __must_check long strnlen_user(const char __user * str, long n);
265 extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
266                                            unsigned long n);
267 extern unsigned long __arch_copy_to_user(void __user * to, const void *from,
268                                          unsigned long n);
269 
270 #define raw_copy_from_user __arch_copy_from_user
271 #define raw_copy_to_user __arch_copy_to_user
272 
273 #define INLINE_COPY_FROM_USER
274 #define INLINE_COPY_TO_USER
clear_user(void __user * to,unsigned long n)275 static inline unsigned long clear_user(void __user * to, unsigned long n)
276 {
277 	if (access_ok(to, n))
278 		n = __arch_clear_user(to, n);
279 	return n;
280 }
281 
__clear_user(void __user * to,unsigned long n)282 static inline unsigned long __clear_user(void __user * to, unsigned long n)
283 {
284 	return __arch_clear_user(to, n);
285 }
286 
287 #endif /* _ASMNDS32_UACCESS_H */
288