1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __M68K_UACCESS_H
3 #define __M68K_UACCESS_H
4 
5 /*
6  * User space memory access functions
7  */
8 #include <linux/compiler.h>
9 #include <linux/types.h>
10 #include <asm/segment.h>
11 
12 /* We let the MMU do all checking */
access_ok(int type,const void __user * addr,unsigned long size)13 static inline int access_ok(int type, const void __user *addr,
14 			    unsigned long size)
15 {
16 	return 1;
17 }
18 
19 /*
20  * Not all varients of the 68k family support the notion of address spaces.
21  * The traditional 680x0 parts do, and they use the sfc/dfc registers and
22  * the "moves" instruction to access user space from kernel space. Other
23  * family members like ColdFire don't support this, and only have a single
24  * address space, and use the usual "move" instruction for user space access.
25  *
26  * Outside of this difference the user space access functions are the same.
27  * So lets keep the code simple and just define in what we need to use.
28  */
29 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
30 #define	MOVES	"moves"
31 #else
32 #define	MOVES	"move"
33 #endif
34 
35 extern int __put_user_bad(void);
36 extern int __get_user_bad(void);
37 
38 #define __put_user_asm(res, x, ptr, bwl, reg, err)	\
39 asm volatile ("\n"					\
40 	"1:	"MOVES"."#bwl"	%2,%1\n"		\
41 	"2:\n"						\
42 	"	.section .fixup,\"ax\"\n"		\
43 	"	.even\n"				\
44 	"10:	moveq.l	%3,%0\n"			\
45 	"	jra 2b\n"				\
46 	"	.previous\n"				\
47 	"\n"						\
48 	"	.section __ex_table,\"a\"\n"		\
49 	"	.align	4\n"				\
50 	"	.long	1b,10b\n"			\
51 	"	.long	2b,10b\n"			\
52 	"	.previous"				\
53 	: "+d" (res), "=m" (*(ptr))			\
54 	: #reg (x), "i" (err))
55 
56 /*
57  * These are the main single-value transfer routines.  They automatically
58  * use the right size if we just have the right pointer type.
59  */
60 
61 #define __put_user(x, ptr)						\
62 ({									\
63 	typeof(*(ptr)) __pu_val = (x);					\
64 	int __pu_err = 0;						\
65 	__chk_user_ptr(ptr);						\
66 	switch (sizeof (*(ptr))) {					\
67 	case 1:								\
68 		__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT);	\
69 		break;							\
70 	case 2:								\
71 		__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT);	\
72 		break;							\
73 	case 4:								\
74 		__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT);	\
75 		break;							\
76 	case 8:								\
77  	    {								\
78  		const void __user *__pu_ptr = (ptr);			\
79 		asm volatile ("\n"					\
80 			"1:	"MOVES".l	%2,(%1)+\n"		\
81 			"2:	"MOVES".l	%R2,(%1)\n"		\
82 			"3:\n"						\
83 			"	.section .fixup,\"ax\"\n"		\
84 			"	.even\n"				\
85 			"10:	movel %3,%0\n"				\
86 			"	jra 3b\n"				\
87 			"	.previous\n"				\
88 			"\n"						\
89 			"	.section __ex_table,\"a\"\n"		\
90 			"	.align 4\n"				\
91 			"	.long 1b,10b\n"				\
92 			"	.long 2b,10b\n"				\
93 			"	.long 3b,10b\n"				\
94 			"	.previous"				\
95 			: "+d" (__pu_err), "+a" (__pu_ptr)		\
96 			: "r" (__pu_val), "i" (-EFAULT)			\
97 			: "memory");					\
98 		break;							\
99 	    }								\
100 	default:							\
101 		__pu_err = __put_user_bad();				\
102 		break;							\
103 	}								\
104 	__pu_err;							\
105 })
106 #define put_user(x, ptr)	__put_user(x, ptr)
107 
108 
109 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({		\
110 	type __gu_val;							\
111 	asm volatile ("\n"						\
112 		"1:	"MOVES"."#bwl"	%2,%1\n"			\
113 		"2:\n"							\
114 		"	.section .fixup,\"ax\"\n"			\
115 		"	.even\n"					\
116 		"10:	move.l	%3,%0\n"				\
117 		"	sub.l	%1,%1\n"				\
118 		"	jra	2b\n"					\
119 		"	.previous\n"					\
120 		"\n"							\
121 		"	.section __ex_table,\"a\"\n"			\
122 		"	.align	4\n"					\
123 		"	.long	1b,10b\n"				\
124 		"	.previous"					\
125 		: "+d" (res), "=&" #reg (__gu_val)			\
126 		: "m" (*(ptr)), "i" (err));				\
127 	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
128 })
129 
130 #define __get_user(x, ptr)						\
131 ({									\
132 	int __gu_err = 0;						\
133 	__chk_user_ptr(ptr);						\
134 	switch (sizeof(*(ptr))) {					\
135 	case 1:								\
136 		__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);	\
137 		break;							\
138 	case 2:								\
139 		__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);	\
140 		break;							\
141 	case 4:								\
142 		__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);	\
143 		break;							\
144 	case 8: {							\
145 		const void *__gu_ptr = (ptr);				\
146 		union {							\
147 			u64 l;						\
148 			__typeof__(*(ptr)) t;				\
149 		} __gu_val;						\
150 		asm volatile ("\n"					\
151 			"1:	"MOVES".l	(%2)+,%1\n"		\
152 			"2:	"MOVES".l	(%2),%R1\n"		\
153 			"3:\n"						\
154 			"	.section .fixup,\"ax\"\n"		\
155 			"	.even\n"				\
156 			"10:	move.l	%3,%0\n"			\
157 			"	sub.l	%1,%1\n"			\
158 			"	sub.l	%R1,%R1\n"			\
159 			"	jra	3b\n"				\
160 			"	.previous\n"				\
161 			"\n"						\
162 			"	.section __ex_table,\"a\"\n"		\
163 			"	.align	4\n"				\
164 			"	.long	1b,10b\n"			\
165 			"	.long	2b,10b\n"			\
166 			"	.previous"				\
167 			: "+d" (__gu_err), "=&r" (__gu_val.l),		\
168 			  "+a" (__gu_ptr)				\
169 			: "i" (-EFAULT)					\
170 			: "memory");					\
171 		(x) = __gu_val.t;					\
172 		break;							\
173 	}								\
174 	default:							\
175 		__gu_err = __get_user_bad();				\
176 		break;							\
177 	}								\
178 	__gu_err;							\
179 })
180 #define get_user(x, ptr) __get_user(x, ptr)
181 
182 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
183 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
184 
185 #define __suffix0
186 #define __suffix1 b
187 #define __suffix2 w
188 #define __suffix4 l
189 
190 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
191 	asm volatile ("\n"						\
192 		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
193 		"	move."#s1"	%3,(%1)+\n"			\
194 		"	.ifnc	\""#s2"\",\"\"\n"			\
195 		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
196 		"	move."#s2"	%3,(%1)+\n"			\
197 		"	.ifnc	\""#s3"\",\"\"\n"			\
198 		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
199 		"	move."#s3"	%3,(%1)+\n"			\
200 		"	.endif\n"					\
201 		"	.endif\n"					\
202 		"4:\n"							\
203 		"	.section __ex_table,\"a\"\n"			\
204 		"	.align	4\n"					\
205 		"	.long	1b,10f\n"				\
206 		"	.ifnc	\""#s2"\",\"\"\n"			\
207 		"	.long	2b,20f\n"				\
208 		"	.ifnc	\""#s3"\",\"\"\n"			\
209 		"	.long	3b,30f\n"				\
210 		"	.endif\n"					\
211 		"	.endif\n"					\
212 		"	.previous\n"					\
213 		"\n"							\
214 		"	.section .fixup,\"ax\"\n"			\
215 		"	.even\n"					\
216 		"10:	addq.l #"#n1",%0\n"				\
217 		"	.ifnc	\""#s2"\",\"\"\n"			\
218 		"20:	addq.l #"#n2",%0\n"				\
219 		"	.ifnc	\""#s3"\",\"\"\n"			\
220 		"30:	addq.l #"#n3",%0\n"				\
221 		"	.endif\n"					\
222 		"	.endif\n"					\
223 		"	jra	4b\n"					\
224 		"	.previous\n"					\
225 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
226 		: : "memory")
227 
228 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
229 	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
230 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
231 	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
232 					__suffix##n1, __suffix##n2, __suffix##n3)
233 
234 static __always_inline unsigned long
__constant_copy_from_user(void * to,const void __user * from,unsigned long n)235 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
236 {
237 	unsigned long res = 0, tmp;
238 
239 	switch (n) {
240 	case 1:
241 		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
242 		break;
243 	case 2:
244 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
245 		break;
246 	case 3:
247 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
248 		break;
249 	case 4:
250 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
251 		break;
252 	case 5:
253 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
254 		break;
255 	case 6:
256 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
257 		break;
258 	case 7:
259 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
260 		break;
261 	case 8:
262 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
263 		break;
264 	case 9:
265 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
266 		break;
267 	case 10:
268 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
269 		break;
270 	case 12:
271 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
272 		break;
273 	default:
274 		/* we limit the inlined version to 3 moves */
275 		return __generic_copy_from_user(to, from, n);
276 	}
277 
278 	return res;
279 }
280 
281 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
282 	asm volatile ("\n"						\
283 		"	move."#s1"	(%2)+,%3\n"			\
284 		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
285 		"12:	move."#s2"	(%2)+,%3\n"			\
286 		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
287 		"22:\n"							\
288 		"	.ifnc	\""#s3"\",\"\"\n"			\
289 		"	move."#s3"	(%2)+,%3\n"			\
290 		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
291 		"32:\n"							\
292 		"	.endif\n"					\
293 		"4:\n"							\
294 		"\n"							\
295 		"	.section __ex_table,\"a\"\n"			\
296 		"	.align	4\n"					\
297 		"	.long	11b,5f\n"				\
298 		"	.long	12b,5f\n"				\
299 		"	.long	21b,5f\n"				\
300 		"	.long	22b,5f\n"				\
301 		"	.ifnc	\""#s3"\",\"\"\n"			\
302 		"	.long	31b,5f\n"				\
303 		"	.long	32b,5f\n"				\
304 		"	.endif\n"					\
305 		"	.previous\n"					\
306 		"\n"							\
307 		"	.section .fixup,\"ax\"\n"			\
308 		"	.even\n"					\
309 		"5:	moveq.l	#"#n",%0\n"				\
310 		"	jra	4b\n"					\
311 		"	.previous\n"					\
312 		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
313 		: : "memory")
314 
315 static __always_inline unsigned long
__constant_copy_to_user(void __user * to,const void * from,unsigned long n)316 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
317 {
318 	unsigned long res = 0, tmp;
319 
320 	switch (n) {
321 	case 1:
322 		__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
323 		break;
324 	case 2:
325 		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
326 		break;
327 	case 3:
328 		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
329 		break;
330 	case 4:
331 		__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
332 		break;
333 	case 5:
334 		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
335 		break;
336 	case 6:
337 		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
338 		break;
339 	case 7:
340 		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
341 		break;
342 	case 8:
343 		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
344 		break;
345 	case 9:
346 		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
347 		break;
348 	case 10:
349 		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
350 		break;
351 	case 12:
352 		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
353 		break;
354 	default:
355 		/* limit the inlined version to 3 moves */
356 		return __generic_copy_to_user(to, from, n);
357 	}
358 
359 	return res;
360 }
361 
362 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)363 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
364 {
365 	if (__builtin_constant_p(n))
366 		return __constant_copy_from_user(to, from, n);
367 	return __generic_copy_from_user(to, from, n);
368 }
369 
370 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)371 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
372 {
373 	if (__builtin_constant_p(n))
374 		return __constant_copy_to_user(to, from, n);
375 	return __generic_copy_to_user(to, from, n);
376 }
377 #define INLINE_COPY_FROM_USER
378 #define INLINE_COPY_TO_USER
379 
380 #define user_addr_max() \
381 	(uaccess_kernel() ? ~0UL : TASK_SIZE)
382 
383 extern long strncpy_from_user(char *dst, const char __user *src, long count);
384 extern __must_check long strnlen_user(const char __user *str, long n);
385 
386 unsigned long __clear_user(void __user *to, unsigned long n);
387 
388 #define clear_user	__clear_user
389 
390 #endif /* _M68K_UACCESS_H */
391