1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13 #include <asm/extable.h>
14
15 /*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS MAKE_MM_SEG(-1UL)
26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
27
28 #define get_fs() (current->thread.addr_limit)
set_fs(mm_segment_t fs)29 static inline void set_fs(mm_segment_t fs)
30 {
31 current->thread.addr_limit = fs;
32 /* On user-mode return, check fs is correct */
33 set_thread_flag(TIF_FSCHECK);
34 }
35
36 #define segment_eq(a, b) ((a).seg == (b).seg)
37 #define user_addr_max() (current->thread.addr_limit.seg)
38
39 /*
40 * Test whether a block of memory is a valid user space address.
41 * Returns 0 if the range is valid, nonzero otherwise.
42 */
__chk_range_not_ok(unsigned long addr,unsigned long size,unsigned long limit)43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
44 {
45 /*
46 * If we have used "sizeof()" for the size,
47 * we know it won't overflow the limit (but
48 * it might overflow the 'addr', so it's
49 * important to subtract the size from the
50 * limit, not add it to the address).
51 */
52 if (__builtin_constant_p(size))
53 return unlikely(addr > limit - size);
54
55 /* Arbitrary sizes? Be careful about overflow */
56 addr += size;
57 if (unlikely(addr < size))
58 return true;
59 return unlikely(addr > limit);
60 }
61
62 #define __range_not_ok(addr, size, limit) \
63 ({ \
64 __chk_user_ptr(addr); \
65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
66 })
67
68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
69 static inline bool pagefault_disabled(void);
70 # define WARN_ON_IN_IRQ() \
71 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
72 #else
73 # define WARN_ON_IN_IRQ()
74 #endif
75
76 /**
77 * access_ok - Checks if a user space pointer is valid
78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check
80 *
81 * Context: User context only. This function may sleep if pagefaults are
82 * enabled.
83 *
84 * Checks if a pointer to a block of memory in user space is valid.
85 *
86 * Note that, depending on architecture, this function probably just
87 * checks that the pointer is in the user space range - after calling
88 * this function, memory access functions may still return -EFAULT.
89 *
90 * Return: true (nonzero) if the memory block may be valid, false (zero)
91 * if it is definitely invalid.
92 */
93 #define access_ok(addr, size) \
94 ({ \
95 WARN_ON_IN_IRQ(); \
96 likely(!__range_not_ok(addr, size, user_addr_max())); \
97 })
98
99 /*
100 * These are the main single-value transfer routines. They automatically
101 * use the right size if we just have the right pointer type.
102 *
103 * This gets kind of ugly. We want to return _two_ values in "get_user()"
104 * and yet we don't want to do any pointers, because that is too much
105 * of a performance impact. Thus we have a few rather ugly macros here,
106 * and hide all the ugliness from the user.
107 *
108 * The "__xxx" versions of the user access functions are versions that
109 * do not verify the address space, that must have been done previously
110 * with a separate "access_ok()" call (this is used when we do multiple
111 * accesses to the same area of user memory).
112 */
113
114 extern int __get_user_1(void);
115 extern int __get_user_2(void);
116 extern int __get_user_4(void);
117 extern int __get_user_8(void);
118 extern int __get_user_bad(void);
119
120 #define __uaccess_begin() stac()
121 #define __uaccess_end() clac()
122 #define __uaccess_begin_nospec() \
123 ({ \
124 stac(); \
125 barrier_nospec(); \
126 })
127
128 /*
129 * This is a type: either unsigned long, if the argument fits into
130 * that type, or otherwise unsigned long long.
131 */
132 #define __inttype(x) \
133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134
135 /**
136 * get_user - Get a simple variable from user space.
137 * @x: Variable to store result.
138 * @ptr: Source address, in user space.
139 *
140 * Context: User context only. This function may sleep if pagefaults are
141 * enabled.
142 *
143 * This macro copies a single simple variable from user space to kernel
144 * space. It supports simple types like char and int, but not larger
145 * data types like structures or arrays.
146 *
147 * @ptr must have pointer-to-simple-variable type, and the result of
148 * dereferencing @ptr must be assignable to @x without a cast.
149 *
150 * Return: zero on success, or -EFAULT on error.
151 * On error, the variable @x is set to zero.
152 */
153 /*
154 * Careful: we have to cast the result to the type of the pointer
155 * for sign reasons.
156 *
157 * The use of _ASM_DX as the register specifier is a bit of a
158 * simplification, as gcc only cares about it as the starting point
159 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
160 * (%ecx being the next register in gcc's x86 register sequence), and
161 * %rdx on 64 bits.
162 *
163 * Clang/LLVM cares about the size of the register, but still wants
164 * the base register for something that ends up being a pair.
165 */
166 #define get_user(x, ptr) \
167 ({ \
168 int __ret_gu; \
169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
170 __chk_user_ptr(ptr); \
171 might_fault(); \
172 asm volatile("call __get_user_%P4" \
173 : "=a" (__ret_gu), "=r" (__val_gu), \
174 ASM_CALL_CONSTRAINT \
175 : "0" (ptr), "i" (sizeof(*(ptr)))); \
176 (x) = (__force __typeof__(*(ptr))) __val_gu; \
177 __builtin_expect(__ret_gu, 0); \
178 })
179
180 #define __put_user_x(size, x, ptr, __ret_pu) \
181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
183
184
185
186 #ifdef CONFIG_X86_32
187 #define __put_user_goto_u64(x, addr, label) \
188 asm_volatile_goto("\n" \
189 "1: movl %%eax,0(%1)\n" \
190 "2: movl %%edx,4(%1)\n" \
191 _ASM_EXTABLE_UA(1b, %l2) \
192 _ASM_EXTABLE_UA(2b, %l2) \
193 : : "A" (x), "r" (addr) \
194 : : label)
195
196 #define __put_user_asm_ex_u64(x, addr) \
197 asm volatile("\n" \
198 "1: movl %%eax,0(%1)\n" \
199 "2: movl %%edx,4(%1)\n" \
200 "3:" \
201 _ASM_EXTABLE_EX(1b, 2b) \
202 _ASM_EXTABLE_EX(2b, 3b) \
203 : : "A" (x), "r" (addr))
204
205 #define __put_user_x8(x, ptr, __ret_pu) \
206 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
207 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
208 #else
209 #define __put_user_goto_u64(x, ptr, label) \
210 __put_user_goto(x, ptr, "q", "", "er", label)
211 #define __put_user_asm_ex_u64(x, addr) \
212 __put_user_asm_ex(x, addr, "q", "", "er")
213 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
214 #endif
215
216 extern void __put_user_bad(void);
217
218 /*
219 * Strange magic calling convention: pointer in %ecx,
220 * value in %eax(:%edx), return value in %eax. clobbers %rbx
221 */
222 extern void __put_user_1(void);
223 extern void __put_user_2(void);
224 extern void __put_user_4(void);
225 extern void __put_user_8(void);
226
227 /**
228 * put_user - Write a simple value into user space.
229 * @x: Value to copy to user space.
230 * @ptr: Destination address, in user space.
231 *
232 * Context: User context only. This function may sleep if pagefaults are
233 * enabled.
234 *
235 * This macro copies a single simple value from kernel space to user
236 * space. It supports simple types like char and int, but not larger
237 * data types like structures or arrays.
238 *
239 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
240 * to the result of dereferencing @ptr.
241 *
242 * Return: zero on success, or -EFAULT on error.
243 */
244 #define put_user(x, ptr) \
245 ({ \
246 int __ret_pu; \
247 __typeof__(*(ptr)) __pu_val; \
248 __chk_user_ptr(ptr); \
249 might_fault(); \
250 __pu_val = x; \
251 switch (sizeof(*(ptr))) { \
252 case 1: \
253 __put_user_x(1, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 2: \
256 __put_user_x(2, __pu_val, ptr, __ret_pu); \
257 break; \
258 case 4: \
259 __put_user_x(4, __pu_val, ptr, __ret_pu); \
260 break; \
261 case 8: \
262 __put_user_x8(__pu_val, ptr, __ret_pu); \
263 break; \
264 default: \
265 __put_user_x(X, __pu_val, ptr, __ret_pu); \
266 break; \
267 } \
268 __builtin_expect(__ret_pu, 0); \
269 })
270
271 #define __put_user_size(x, ptr, size, label) \
272 do { \
273 __chk_user_ptr(ptr); \
274 switch (size) { \
275 case 1: \
276 __put_user_goto(x, ptr, "b", "b", "iq", label); \
277 break; \
278 case 2: \
279 __put_user_goto(x, ptr, "w", "w", "ir", label); \
280 break; \
281 case 4: \
282 __put_user_goto(x, ptr, "l", "k", "ir", label); \
283 break; \
284 case 8: \
285 __put_user_goto_u64(x, ptr, label); \
286 break; \
287 default: \
288 __put_user_bad(); \
289 } \
290 } while (0)
291
292 /*
293 * This doesn't do __uaccess_begin/end - the exception handling
294 * around it must do that.
295 */
296 #define __put_user_size_ex(x, ptr, size) \
297 do { \
298 __chk_user_ptr(ptr); \
299 switch (size) { \
300 case 1: \
301 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
302 break; \
303 case 2: \
304 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
305 break; \
306 case 4: \
307 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
308 break; \
309 case 8: \
310 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
311 break; \
312 default: \
313 __put_user_bad(); \
314 } \
315 } while (0)
316
317 #ifdef CONFIG_X86_32
318 #define __get_user_asm_u64(x, ptr, retval, errret) \
319 ({ \
320 __typeof__(ptr) __ptr = (ptr); \
321 asm volatile("\n" \
322 "1: movl %2,%%eax\n" \
323 "2: movl %3,%%edx\n" \
324 "3:\n" \
325 ".section .fixup,\"ax\"\n" \
326 "4: mov %4,%0\n" \
327 " xorl %%eax,%%eax\n" \
328 " xorl %%edx,%%edx\n" \
329 " jmp 3b\n" \
330 ".previous\n" \
331 _ASM_EXTABLE_UA(1b, 4b) \
332 _ASM_EXTABLE_UA(2b, 4b) \
333 : "=r" (retval), "=&A"(x) \
334 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
335 "i" (errret), "0" (retval)); \
336 })
337
338 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
339 #else
340 #define __get_user_asm_u64(x, ptr, retval, errret) \
341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
342 #define __get_user_asm_ex_u64(x, ptr) \
343 __get_user_asm_ex(x, ptr, "q", "", "=r")
344 #endif
345
346 #define __get_user_size(x, ptr, size, retval, errret) \
347 do { \
348 retval = 0; \
349 __chk_user_ptr(ptr); \
350 switch (size) { \
351 case 1: \
352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
353 break; \
354 case 2: \
355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
356 break; \
357 case 4: \
358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
359 break; \
360 case 8: \
361 __get_user_asm_u64(x, ptr, retval, errret); \
362 break; \
363 default: \
364 (x) = __get_user_bad(); \
365 } \
366 } while (0)
367
368 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
369 asm volatile("\n" \
370 "1: mov"itype" %2,%"rtype"1\n" \
371 "2:\n" \
372 ".section .fixup,\"ax\"\n" \
373 "3: mov %3,%0\n" \
374 " xor"itype" %"rtype"1,%"rtype"1\n" \
375 " jmp 2b\n" \
376 ".previous\n" \
377 _ASM_EXTABLE_UA(1b, 3b) \
378 : "=r" (err), ltype(x) \
379 : "m" (__m(addr)), "i" (errret), "0" (err))
380
381 #define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
382 asm volatile("\n" \
383 "1: mov"itype" %2,%"rtype"1\n" \
384 "2:\n" \
385 ".section .fixup,\"ax\"\n" \
386 "3: mov %3,%0\n" \
387 " jmp 2b\n" \
388 ".previous\n" \
389 _ASM_EXTABLE_UA(1b, 3b) \
390 : "=r" (err), ltype(x) \
391 : "m" (__m(addr)), "i" (errret), "0" (err))
392
393 /*
394 * This doesn't do __uaccess_begin/end - the exception handling
395 * around it must do that.
396 */
397 #define __get_user_size_ex(x, ptr, size) \
398 do { \
399 __chk_user_ptr(ptr); \
400 switch (size) { \
401 case 1: \
402 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
403 break; \
404 case 2: \
405 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
406 break; \
407 case 4: \
408 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
409 break; \
410 case 8: \
411 __get_user_asm_ex_u64(x, ptr); \
412 break; \
413 default: \
414 (x) = __get_user_bad(); \
415 } \
416 } while (0)
417
418 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
419 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
420 "2:\n" \
421 ".section .fixup,\"ax\"\n" \
422 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
423 " jmp 2b\n" \
424 ".previous\n" \
425 _ASM_EXTABLE_EX(1b, 3b) \
426 : ltype(x) : "m" (__m(addr)))
427
428 #define __put_user_nocheck(x, ptr, size) \
429 ({ \
430 __label__ __pu_label; \
431 int __pu_err = -EFAULT; \
432 __typeof__(*(ptr)) __pu_val = (x); \
433 __typeof__(ptr) __pu_ptr = (ptr); \
434 __typeof__(size) __pu_size = (size); \
435 __uaccess_begin(); \
436 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
437 __pu_err = 0; \
438 __pu_label: \
439 __uaccess_end(); \
440 __builtin_expect(__pu_err, 0); \
441 })
442
443 #define __get_user_nocheck(x, ptr, size) \
444 ({ \
445 int __gu_err; \
446 __inttype(*(ptr)) __gu_val; \
447 __typeof__(ptr) __gu_ptr = (ptr); \
448 __typeof__(size) __gu_size = (size); \
449 __uaccess_begin_nospec(); \
450 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
451 __uaccess_end(); \
452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
453 __builtin_expect(__gu_err, 0); \
454 })
455
456 /* FIXME: this hack is definitely wrong -AK */
457 struct __large_struct { unsigned long buf[100]; };
458 #define __m(x) (*(struct __large_struct __user *)(x))
459
460 /*
461 * Tell gcc we read from memory instead of writing: this is because
462 * we do not write to any memory gcc knows about, so there are no
463 * aliasing issues.
464 */
465 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \
466 asm_volatile_goto("\n" \
467 "1: mov"itype" %"rtype"0,%1\n" \
468 _ASM_EXTABLE_UA(1b, %l2) \
469 : : ltype(x), "m" (__m(addr)) \
470 : : label)
471
472 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
473 ({ __label__ __puflab; \
474 int __pufret = errret; \
475 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
476 __pufret = 0; \
477 __puflab: __pufret; })
478
479 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
480 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
481 } while (0)
482
483 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
484 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
485 "2:\n" \
486 _ASM_EXTABLE_EX(1b, 2b) \
487 : : ltype(x), "m" (__m(addr)))
488
489 /*
490 * uaccess_try and catch
491 */
492 #define uaccess_try do { \
493 current->thread.uaccess_err = 0; \
494 __uaccess_begin(); \
495 barrier();
496
497 #define uaccess_try_nospec do { \
498 current->thread.uaccess_err = 0; \
499 __uaccess_begin_nospec(); \
500
501 #define uaccess_catch(err) \
502 __uaccess_end(); \
503 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
504 } while (0)
505
506 /**
507 * __get_user - Get a simple variable from user space, with less checking.
508 * @x: Variable to store result.
509 * @ptr: Source address, in user space.
510 *
511 * Context: User context only. This function may sleep if pagefaults are
512 * enabled.
513 *
514 * This macro copies a single simple variable from user space to kernel
515 * space. It supports simple types like char and int, but not larger
516 * data types like structures or arrays.
517 *
518 * @ptr must have pointer-to-simple-variable type, and the result of
519 * dereferencing @ptr must be assignable to @x without a cast.
520 *
521 * Caller must check the pointer with access_ok() before calling this
522 * function.
523 *
524 * Return: zero on success, or -EFAULT on error.
525 * On error, the variable @x is set to zero.
526 */
527
528 #define __get_user(x, ptr) \
529 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
530
531 /**
532 * __put_user - Write a simple value into user space, with less checking.
533 * @x: Value to copy to user space.
534 * @ptr: Destination address, in user space.
535 *
536 * Context: User context only. This function may sleep if pagefaults are
537 * enabled.
538 *
539 * This macro copies a single simple value from kernel space to user
540 * space. It supports simple types like char and int, but not larger
541 * data types like structures or arrays.
542 *
543 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
544 * to the result of dereferencing @ptr.
545 *
546 * Caller must check the pointer with access_ok() before calling this
547 * function.
548 *
549 * Return: zero on success, or -EFAULT on error.
550 */
551
552 #define __put_user(x, ptr) \
553 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
554
555 /*
556 * {get|put}_user_try and catch
557 *
558 * get_user_try {
559 * get_user_ex(...);
560 * } get_user_catch(err)
561 */
562 #define get_user_try uaccess_try_nospec
563 #define get_user_catch(err) uaccess_catch(err)
564
565 #define get_user_ex(x, ptr) do { \
566 unsigned long __gue_val; \
567 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
568 (x) = (__force __typeof__(*(ptr)))__gue_val; \
569 } while (0)
570
571 #define put_user_try uaccess_try
572 #define put_user_catch(err) uaccess_catch(err)
573
574 #define put_user_ex(x, ptr) \
575 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
576
577 extern unsigned long
578 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
579 extern __must_check long
580 strncpy_from_user(char *dst, const char __user *src, long count);
581
582 extern __must_check long strnlen_user(const char __user *str, long n);
583
584 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
585 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
586
587 extern void __cmpxchg_wrong_size(void)
588 __compiletime_error("Bad argument size for cmpxchg");
589
590 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
591 ({ \
592 int __ret = 0; \
593 __typeof__(*(ptr)) __old = (old); \
594 __typeof__(*(ptr)) __new = (new); \
595 __uaccess_begin_nospec(); \
596 switch (size) { \
597 case 1: \
598 { \
599 asm volatile("\n" \
600 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
601 "2:\n" \
602 "\t.section .fixup, \"ax\"\n" \
603 "3:\tmov %3, %0\n" \
604 "\tjmp 2b\n" \
605 "\t.previous\n" \
606 _ASM_EXTABLE_UA(1b, 3b) \
607 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : "i" (-EFAULT), "q" (__new), "1" (__old) \
609 : "memory" \
610 ); \
611 break; \
612 } \
613 case 2: \
614 { \
615 asm volatile("\n" \
616 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
617 "2:\n" \
618 "\t.section .fixup, \"ax\"\n" \
619 "3:\tmov %3, %0\n" \
620 "\tjmp 2b\n" \
621 "\t.previous\n" \
622 _ASM_EXTABLE_UA(1b, 3b) \
623 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
624 : "i" (-EFAULT), "r" (__new), "1" (__old) \
625 : "memory" \
626 ); \
627 break; \
628 } \
629 case 4: \
630 { \
631 asm volatile("\n" \
632 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
633 "2:\n" \
634 "\t.section .fixup, \"ax\"\n" \
635 "3:\tmov %3, %0\n" \
636 "\tjmp 2b\n" \
637 "\t.previous\n" \
638 _ASM_EXTABLE_UA(1b, 3b) \
639 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
640 : "i" (-EFAULT), "r" (__new), "1" (__old) \
641 : "memory" \
642 ); \
643 break; \
644 } \
645 case 8: \
646 { \
647 if (!IS_ENABLED(CONFIG_X86_64)) \
648 __cmpxchg_wrong_size(); \
649 \
650 asm volatile("\n" \
651 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
652 "2:\n" \
653 "\t.section .fixup, \"ax\"\n" \
654 "3:\tmov %3, %0\n" \
655 "\tjmp 2b\n" \
656 "\t.previous\n" \
657 _ASM_EXTABLE_UA(1b, 3b) \
658 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
659 : "i" (-EFAULT), "r" (__new), "1" (__old) \
660 : "memory" \
661 ); \
662 break; \
663 } \
664 default: \
665 __cmpxchg_wrong_size(); \
666 } \
667 __uaccess_end(); \
668 *(uval) = __old; \
669 __ret; \
670 })
671
672 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
673 ({ \
674 access_ok((ptr), sizeof(*(ptr))) ? \
675 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
676 (old), (new), sizeof(*(ptr))) : \
677 -EFAULT; \
678 })
679
680 /*
681 * movsl can be slow when source and dest are not both 8-byte aligned
682 */
683 #ifdef CONFIG_X86_INTEL_USERCOPY
684 extern struct movsl_mask {
685 int mask;
686 } ____cacheline_aligned_in_smp movsl_mask;
687 #endif
688
689 #define ARCH_HAS_NOCACHE_UACCESS 1
690
691 #ifdef CONFIG_X86_32
692 # include <asm/uaccess_32.h>
693 #else
694 # include <asm/uaccess_64.h>
695 #endif
696
697 /*
698 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
699 * nested NMI paths are careful to preserve CR2.
700 *
701 * Caller must use pagefault_enable/disable, or run in interrupt context,
702 * and also do a uaccess_ok() check
703 */
704 #define __copy_from_user_nmi __copy_from_user_inatomic
705
706 /*
707 * The "unsafe" user accesses aren't really "unsafe", but the naming
708 * is a big fat warning: you have to not only do the access_ok()
709 * checking before using them, but you have to surround them with the
710 * user_access_begin/end() pair.
711 */
user_access_begin(const void __user * ptr,size_t len)712 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
713 {
714 if (unlikely(!access_ok(ptr,len)))
715 return 0;
716 __uaccess_begin_nospec();
717 return 1;
718 }
719 #define user_access_begin(a,b) user_access_begin(a,b)
720 #define user_access_end() __uaccess_end()
721
722 #define user_access_save() smap_save()
723 #define user_access_restore(x) smap_restore(x)
724
725 #define unsafe_put_user(x, ptr, label) \
726 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
727
728 #define unsafe_get_user(x, ptr, err_label) \
729 do { \
730 int __gu_err; \
731 __inttype(*(ptr)) __gu_val; \
732 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
733 (x) = (__force __typeof__(*(ptr)))__gu_val; \
734 if (unlikely(__gu_err)) goto err_label; \
735 } while (0)
736
737 /*
738 * We want the unsafe accessors to always be inlined and use
739 * the error labels - thus the macro games.
740 */
741 #define unsafe_copy_loop(dst, src, len, type, label) \
742 while (len >= sizeof(type)) { \
743 unsafe_put_user(*(type *)src,(type __user *)dst,label); \
744 dst += sizeof(type); \
745 src += sizeof(type); \
746 len -= sizeof(type); \
747 }
748
749 #define unsafe_copy_to_user(_dst,_src,_len,label) \
750 do { \
751 char __user *__ucu_dst = (_dst); \
752 const char *__ucu_src = (_src); \
753 size_t __ucu_len = (_len); \
754 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
755 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
756 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
757 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
758 } while (0)
759
760 #endif /* _ASM_X86_UACCESS_H */
761
762