Merge branch 'asoc-fix-ep93xx' into spi-fix-ep93xx
[cascardo/linux.git] / arch / x86 / include / asm / uaccess.h
1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
3 /*
4  * User space memory access functions
5  */
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/thread_info.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13
14 #define VERIFY_READ 0
15 #define VERIFY_WRITE 1
16
17 /*
18  * The fs value determines whether argument validity checking should be
19  * performed or not.  If get_fs() == USER_DS, checking is performed, with
20  * get_fs() == KERNEL_DS, checking is bypassed.
21  *
22  * For historical reasons, these macros are grossly misnamed.
23  */
24
25 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
26
27 #define KERNEL_DS       MAKE_MM_SEG(-1UL)
28 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
29
30 #define get_ds()        (KERNEL_DS)
31 #define get_fs()        (current_thread_info()->addr_limit)
32 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
33
34 #define segment_eq(a, b)        ((a).seg == (b).seg)
35
36 #define user_addr_max() (current_thread_info()->addr_limit.seg)
37 #define __addr_ok(addr)         \
38         ((unsigned long __force)(addr) < user_addr_max())
39
40 /*
41  * Test whether a block of memory is a valid user space address.
42  * Returns 0 if the range is valid, nonzero otherwise.
43  */
44 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
45 {
46         /*
47          * If we have used "sizeof()" for the size,
48          * we know it won't overflow the limit (but
49          * it might overflow the 'addr', so it's
50          * important to subtract the size from the
51          * limit, not add it to the address).
52          */
53         if (__builtin_constant_p(size))
54                 return unlikely(addr > limit - size);
55
56         /* Arbitrary sizes? Be careful about overflow */
57         addr += size;
58         if (unlikely(addr < size))
59                 return true;
60         return unlikely(addr > limit);
61 }
62
63 #define __range_not_ok(addr, size, limit)                               \
64 ({                                                                      \
65         __chk_user_ptr(addr);                                           \
66         __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
67 })
68
69 /**
70  * access_ok: - Checks if a user space pointer is valid
71  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
72  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
73  *        to write to a block, it is always safe to read from it.
74  * @addr: User space pointer to start of block to check
75  * @size: Size of block to check
76  *
77  * Context: User context only. This function may sleep if pagefaults are
78  *          enabled.
79  *
80  * Checks if a pointer to a block of memory in user space is valid.
81  *
82  * Returns true (nonzero) if the memory block may be valid, false (zero)
83  * if it is definitely invalid.
84  *
85  * Note that, depending on architecture, this function probably just
86  * checks that the pointer is in the user space range - after calling
87  * this function, memory access functions may still return -EFAULT.
88  */
89 #define access_ok(type, addr, size) \
90         likely(!__range_not_ok(addr, size, user_addr_max()))
91
92 /*
93  * The exception table consists of triples of addresses relative to the
94  * exception table entry itself. The first address is of an instruction
95  * that is allowed to fault, the second is the target at which the program
96  * should continue. The third is a handler function to deal with the fault
97  * caused by the instruction in the first field.
98  *
99  * All the routines below use bits of fixup code that are out of line
100  * with the main instruction path.  This means when everything is well,
101  * we don't even have to jump over them.  Further, they do not intrude
102  * on our cache or tlb entries.
103  */
104
105 struct exception_table_entry {
106         int insn, fixup, handler;
107 };
108
109 #define ARCH_HAS_RELATIVE_EXTABLE
110
111 #define swap_ex_entry_fixup(a, b, tmp, delta)                   \
112         do {                                                    \
113                 (a)->fixup = (b)->fixup + (delta);              \
114                 (b)->fixup = (tmp).fixup - (delta);             \
115                 (a)->handler = (b)->handler + (delta);          \
116                 (b)->handler = (tmp).handler - (delta);         \
117         } while (0)
118
119 extern int fixup_exception(struct pt_regs *regs, int trapnr);
120 extern bool ex_has_fault_handler(unsigned long ip);
121 extern int early_fixup_exception(unsigned long *ip);
122
123 /*
124  * These are the main single-value transfer routines.  They automatically
125  * use the right size if we just have the right pointer type.
126  *
127  * This gets kind of ugly. We want to return _two_ values in "get_user()"
128  * and yet we don't want to do any pointers, because that is too much
129  * of a performance impact. Thus we have a few rather ugly macros here,
130  * and hide all the ugliness from the user.
131  *
132  * The "__xxx" versions of the user access functions are versions that
133  * do not verify the address space, that must have been done previously
134  * with a separate "access_ok()" call (this is used when we do multiple
135  * accesses to the same area of user memory).
136  */
137
138 extern int __get_user_1(void);
139 extern int __get_user_2(void);
140 extern int __get_user_4(void);
141 extern int __get_user_8(void);
142 extern int __get_user_bad(void);
143
144 #define __uaccess_begin() stac()
145 #define __uaccess_end()   clac()
146
147 /*
148  * This is a type: either unsigned long, if the argument fits into
149  * that type, or otherwise unsigned long long.
150  */
151 #define __inttype(x) \
152 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
153
154 /**
155  * get_user: - Get a simple variable from user space.
156  * @x:   Variable to store result.
157  * @ptr: Source address, in user space.
158  *
159  * Context: User context only. This function may sleep if pagefaults are
160  *          enabled.
161  *
162  * This macro copies a single simple variable from user space to kernel
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and the result of
167  * dereferencing @ptr must be assignable to @x without a cast.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  * On error, the variable @x is set to zero.
171  */
172 /*
173  * Careful: we have to cast the result to the type of the pointer
174  * for sign reasons.
175  *
176  * The use of _ASM_DX as the register specifier is a bit of a
177  * simplification, as gcc only cares about it as the starting point
178  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
179  * (%ecx being the next register in gcc's x86 register sequence), and
180  * %rdx on 64 bits.
181  *
182  * Clang/LLVM cares about the size of the register, but still wants
183  * the base register for something that ends up being a pair.
184  */
185 #define get_user(x, ptr)                                                \
186 ({                                                                      \
187         int __ret_gu;                                                   \
188         register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
189         register void *__sp asm(_ASM_SP);                               \
190         __chk_user_ptr(ptr);                                            \
191         might_fault();                                                  \
192         asm volatile("call __get_user_%P4"                              \
193                      : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)    \
194                      : "0" (ptr), "i" (sizeof(*(ptr))));                \
195         (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
196         __builtin_expect(__ret_gu, 0);                                  \
197 })
198
199 #define __put_user_x(size, x, ptr, __ret_pu)                    \
200         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
201                      : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
202
203
204
205 #ifdef CONFIG_X86_32
206 #define __put_user_asm_u64(x, addr, err, errret)                        \
207         asm volatile("\n"                                               \
208                      "1:        movl %%eax,0(%2)\n"                     \
209                      "2:        movl %%edx,4(%2)\n"                     \
210                      "3:"                                               \
211                      ".section .fixup,\"ax\"\n"                         \
212                      "4:        movl %3,%0\n"                           \
213                      "  jmp 3b\n"                                       \
214                      ".previous\n"                                      \
215                      _ASM_EXTABLE(1b, 4b)                               \
216                      _ASM_EXTABLE(2b, 4b)                               \
217                      : "=r" (err)                                       \
218                      : "A" (x), "r" (addr), "i" (errret), "0" (err))
219
220 #define __put_user_asm_ex_u64(x, addr)                                  \
221         asm volatile("\n"                                               \
222                      "1:        movl %%eax,0(%1)\n"                     \
223                      "2:        movl %%edx,4(%1)\n"                     \
224                      "3:"                                               \
225                      _ASM_EXTABLE_EX(1b, 2b)                            \
226                      _ASM_EXTABLE_EX(2b, 3b)                            \
227                      : : "A" (x), "r" (addr))
228
229 #define __put_user_x8(x, ptr, __ret_pu)                         \
230         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
231                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
232 #else
233 #define __put_user_asm_u64(x, ptr, retval, errret) \
234         __put_user_asm(x, ptr, retval, "q", "", "er", errret)
235 #define __put_user_asm_ex_u64(x, addr)  \
236         __put_user_asm_ex(x, addr, "q", "", "er")
237 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
238 #endif
239
240 extern void __put_user_bad(void);
241
242 /*
243  * Strange magic calling convention: pointer in %ecx,
244  * value in %eax(:%edx), return value in %eax. clobbers %rbx
245  */
246 extern void __put_user_1(void);
247 extern void __put_user_2(void);
248 extern void __put_user_4(void);
249 extern void __put_user_8(void);
250
251 /**
252  * put_user: - Write a simple value into user space.
253  * @x:   Value to copy to user space.
254  * @ptr: Destination address, in user space.
255  *
256  * Context: User context only. This function may sleep if pagefaults are
257  *          enabled.
258  *
259  * This macro copies a single simple value from kernel space to user
260  * space.  It supports simple types like char and int, but not larger
261  * data types like structures or arrays.
262  *
263  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
264  * to the result of dereferencing @ptr.
265  *
266  * Returns zero on success, or -EFAULT on error.
267  */
268 #define put_user(x, ptr)                                        \
269 ({                                                              \
270         int __ret_pu;                                           \
271         __typeof__(*(ptr)) __pu_val;                            \
272         __chk_user_ptr(ptr);                                    \
273         might_fault();                                          \
274         __pu_val = x;                                           \
275         switch (sizeof(*(ptr))) {                               \
276         case 1:                                                 \
277                 __put_user_x(1, __pu_val, ptr, __ret_pu);       \
278                 break;                                          \
279         case 2:                                                 \
280                 __put_user_x(2, __pu_val, ptr, __ret_pu);       \
281                 break;                                          \
282         case 4:                                                 \
283                 __put_user_x(4, __pu_val, ptr, __ret_pu);       \
284                 break;                                          \
285         case 8:                                                 \
286                 __put_user_x8(__pu_val, ptr, __ret_pu);         \
287                 break;                                          \
288         default:                                                \
289                 __put_user_x(X, __pu_val, ptr, __ret_pu);       \
290                 break;                                          \
291         }                                                       \
292         __builtin_expect(__ret_pu, 0);                          \
293 })
294
295 #define __put_user_size(x, ptr, size, retval, errret)                   \
296 do {                                                                    \
297         retval = 0;                                                     \
298         __chk_user_ptr(ptr);                                            \
299         switch (size) {                                                 \
300         case 1:                                                         \
301                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
302                 break;                                                  \
303         case 2:                                                         \
304                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
305                 break;                                                  \
306         case 4:                                                         \
307                 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
308                 break;                                                  \
309         case 8:                                                         \
310                 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
311                                    errret);                             \
312                 break;                                                  \
313         default:                                                        \
314                 __put_user_bad();                                       \
315         }                                                               \
316 } while (0)
317
318 /*
319  * This doesn't do __uaccess_begin/end - the exception handling
320  * around it must do that.
321  */
322 #define __put_user_size_ex(x, ptr, size)                                \
323 do {                                                                    \
324         __chk_user_ptr(ptr);                                            \
325         switch (size) {                                                 \
326         case 1:                                                         \
327                 __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
328                 break;                                                  \
329         case 2:                                                         \
330                 __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
331                 break;                                                  \
332         case 4:                                                         \
333                 __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
334                 break;                                                  \
335         case 8:                                                         \
336                 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
337                 break;                                                  \
338         default:                                                        \
339                 __put_user_bad();                                       \
340         }                                                               \
341 } while (0)
342
343 #ifdef CONFIG_X86_32
344 #define __get_user_asm_u64(x, ptr, retval, errret)      (x) = __get_user_bad()
345 #define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
346 #else
347 #define __get_user_asm_u64(x, ptr, retval, errret) \
348          __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
349 #define __get_user_asm_ex_u64(x, ptr) \
350          __get_user_asm_ex(x, ptr, "q", "", "=r")
351 #endif
352
353 #define __get_user_size(x, ptr, size, retval, errret)                   \
354 do {                                                                    \
355         retval = 0;                                                     \
356         __chk_user_ptr(ptr);                                            \
357         switch (size) {                                                 \
358         case 1:                                                         \
359                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
360                 break;                                                  \
361         case 2:                                                         \
362                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
363                 break;                                                  \
364         case 4:                                                         \
365                 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
366                 break;                                                  \
367         case 8:                                                         \
368                 __get_user_asm_u64(x, ptr, retval, errret);             \
369                 break;                                                  \
370         default:                                                        \
371                 (x) = __get_user_bad();                                 \
372         }                                                               \
373 } while (0)
374
375 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
376         asm volatile("\n"                                               \
377                      "1:        mov"itype" %2,%"rtype"1\n"              \
378                      "2:\n"                                             \
379                      ".section .fixup,\"ax\"\n"                         \
380                      "3:        mov %3,%0\n"                            \
381                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
382                      "  jmp 2b\n"                                       \
383                      ".previous\n"                                      \
384                      _ASM_EXTABLE(1b, 3b)                               \
385                      : "=r" (err), ltype(x)                             \
386                      : "m" (__m(addr)), "i" (errret), "0" (err))
387
388 /*
389  * This doesn't do __uaccess_begin/end - the exception handling
390  * around it must do that.
391  */
392 #define __get_user_size_ex(x, ptr, size)                                \
393 do {                                                                    \
394         __chk_user_ptr(ptr);                                            \
395         switch (size) {                                                 \
396         case 1:                                                         \
397                 __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
398                 break;                                                  \
399         case 2:                                                         \
400                 __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
401                 break;                                                  \
402         case 4:                                                         \
403                 __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
404                 break;                                                  \
405         case 8:                                                         \
406                 __get_user_asm_ex_u64(x, ptr);                          \
407                 break;                                                  \
408         default:                                                        \
409                 (x) = __get_user_bad();                                 \
410         }                                                               \
411 } while (0)
412
413 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
414         asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
415                      "2:\n"                                             \
416                      _ASM_EXTABLE_EX(1b, 2b)                            \
417                      : ltype(x) : "m" (__m(addr)))
418
419 #define __put_user_nocheck(x, ptr, size)                        \
420 ({                                                              \
421         int __pu_err;                                           \
422         __uaccess_begin();                                      \
423         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
424         __uaccess_end();                                        \
425         __builtin_expect(__pu_err, 0);                          \
426 })
427
428 #define __get_user_nocheck(x, ptr, size)                                \
429 ({                                                                      \
430         int __gu_err;                                                   \
431         unsigned long __gu_val;                                         \
432         __uaccess_begin();                                              \
433         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
434         __uaccess_end();                                                \
435         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
436         __builtin_expect(__gu_err, 0);                                  \
437 })
438
439 /* FIXME: this hack is definitely wrong -AK */
440 struct __large_struct { unsigned long buf[100]; };
441 #define __m(x) (*(struct __large_struct __user *)(x))
442
443 /*
444  * Tell gcc we read from memory instead of writing: this is because
445  * we do not write to any memory gcc knows about, so there are no
446  * aliasing issues.
447  */
448 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
449         asm volatile("\n"                                               \
450                      "1:        mov"itype" %"rtype"1,%2\n"              \
451                      "2:\n"                                             \
452                      ".section .fixup,\"ax\"\n"                         \
453                      "3:        mov %3,%0\n"                            \
454                      "  jmp 2b\n"                                       \
455                      ".previous\n"                                      \
456                      _ASM_EXTABLE(1b, 3b)                               \
457                      : "=r"(err)                                        \
458                      : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
459
460 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
461         asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
462                      "2:\n"                                             \
463                      _ASM_EXTABLE_EX(1b, 2b)                            \
464                      : : ltype(x), "m" (__m(addr)))
465
466 /*
467  * uaccess_try and catch
468  */
469 #define uaccess_try     do {                                            \
470         current_thread_info()->uaccess_err = 0;                         \
471         __uaccess_begin();                                              \
472         barrier();
473
474 #define uaccess_catch(err)                                              \
475         __uaccess_end();                                                \
476         (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
477 } while (0)
478
479 /**
480  * __get_user: - Get a simple variable from user space, with less checking.
481  * @x:   Variable to store result.
482  * @ptr: Source address, in user space.
483  *
484  * Context: User context only. This function may sleep if pagefaults are
485  *          enabled.
486  *
487  * This macro copies a single simple variable from user space to kernel
488  * space.  It supports simple types like char and int, but not larger
489  * data types like structures or arrays.
490  *
491  * @ptr must have pointer-to-simple-variable type, and the result of
492  * dereferencing @ptr must be assignable to @x without a cast.
493  *
494  * Caller must check the pointer with access_ok() before calling this
495  * function.
496  *
497  * Returns zero on success, or -EFAULT on error.
498  * On error, the variable @x is set to zero.
499  */
500
501 #define __get_user(x, ptr)                                              \
502         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
503
504 /**
505  * __put_user: - Write a simple value into user space, with less checking.
506  * @x:   Value to copy to user space.
507  * @ptr: Destination address, in user space.
508  *
509  * Context: User context only. This function may sleep if pagefaults are
510  *          enabled.
511  *
512  * This macro copies a single simple value from kernel space to user
513  * space.  It supports simple types like char and int, but not larger
514  * data types like structures or arrays.
515  *
516  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
517  * to the result of dereferencing @ptr.
518  *
519  * Caller must check the pointer with access_ok() before calling this
520  * function.
521  *
522  * Returns zero on success, or -EFAULT on error.
523  */
524
525 #define __put_user(x, ptr)                                              \
526         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
527
528 #define __get_user_unaligned __get_user
529 #define __put_user_unaligned __put_user
530
531 /*
532  * {get|put}_user_try and catch
533  *
534  * get_user_try {
535  *      get_user_ex(...);
536  * } get_user_catch(err)
537  */
538 #define get_user_try            uaccess_try
539 #define get_user_catch(err)     uaccess_catch(err)
540
541 #define get_user_ex(x, ptr)     do {                                    \
542         unsigned long __gue_val;                                        \
543         __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
544         (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
545 } while (0)
546
547 #define put_user_try            uaccess_try
548 #define put_user_catch(err)     uaccess_catch(err)
549
550 #define put_user_ex(x, ptr)                                             \
551         __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
552
553 extern unsigned long
554 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
555 extern __must_check long
556 strncpy_from_user(char *dst, const char __user *src, long count);
557
558 extern __must_check long strlen_user(const char __user *str);
559 extern __must_check long strnlen_user(const char __user *str, long n);
560
561 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
562 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
563
564 extern void __cmpxchg_wrong_size(void)
565         __compiletime_error("Bad argument size for cmpxchg");
566
567 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
568 ({                                                                      \
569         int __ret = 0;                                                  \
570         __typeof__(ptr) __uval = (uval);                                \
571         __typeof__(*(ptr)) __old = (old);                               \
572         __typeof__(*(ptr)) __new = (new);                               \
573         __uaccess_begin();                                              \
574         switch (size) {                                                 \
575         case 1:                                                         \
576         {                                                               \
577                 asm volatile("\n"                                       \
578                         "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
579                         "2:\n"                                          \
580                         "\t.section .fixup, \"ax\"\n"                   \
581                         "3:\tmov     %3, %0\n"                          \
582                         "\tjmp     2b\n"                                \
583                         "\t.previous\n"                                 \
584                         _ASM_EXTABLE(1b, 3b)                            \
585                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
586                         : "i" (-EFAULT), "q" (__new), "1" (__old)       \
587                         : "memory"                                      \
588                 );                                                      \
589                 break;                                                  \
590         }                                                               \
591         case 2:                                                         \
592         {                                                               \
593                 asm volatile("\n"                                       \
594                         "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
595                         "2:\n"                                          \
596                         "\t.section .fixup, \"ax\"\n"                   \
597                         "3:\tmov     %3, %0\n"                          \
598                         "\tjmp     2b\n"                                \
599                         "\t.previous\n"                                 \
600                         _ASM_EXTABLE(1b, 3b)                            \
601                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
602                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
603                         : "memory"                                      \
604                 );                                                      \
605                 break;                                                  \
606         }                                                               \
607         case 4:                                                         \
608         {                                                               \
609                 asm volatile("\n"                                       \
610                         "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
611                         "2:\n"                                          \
612                         "\t.section .fixup, \"ax\"\n"                   \
613                         "3:\tmov     %3, %0\n"                          \
614                         "\tjmp     2b\n"                                \
615                         "\t.previous\n"                                 \
616                         _ASM_EXTABLE(1b, 3b)                            \
617                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
618                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
619                         : "memory"                                      \
620                 );                                                      \
621                 break;                                                  \
622         }                                                               \
623         case 8:                                                         \
624         {                                                               \
625                 if (!IS_ENABLED(CONFIG_X86_64))                         \
626                         __cmpxchg_wrong_size();                         \
627                                                                         \
628                 asm volatile("\n"                                       \
629                         "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
630                         "2:\n"                                          \
631                         "\t.section .fixup, \"ax\"\n"                   \
632                         "3:\tmov     %3, %0\n"                          \
633                         "\tjmp     2b\n"                                \
634                         "\t.previous\n"                                 \
635                         _ASM_EXTABLE(1b, 3b)                            \
636                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
637                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
638                         : "memory"                                      \
639                 );                                                      \
640                 break;                                                  \
641         }                                                               \
642         default:                                                        \
643                 __cmpxchg_wrong_size();                                 \
644         }                                                               \
645         __uaccess_end();                                                \
646         *__uval = __old;                                                \
647         __ret;                                                          \
648 })
649
650 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
651 ({                                                                      \
652         access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
653                 __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
654                                 (old), (new), sizeof(*(ptr))) :         \
655                 -EFAULT;                                                \
656 })
657
658 /*
659  * movsl can be slow when source and dest are not both 8-byte aligned
660  */
661 #ifdef CONFIG_X86_INTEL_USERCOPY
662 extern struct movsl_mask {
663         int mask;
664 } ____cacheline_aligned_in_smp movsl_mask;
665 #endif
666
667 #define ARCH_HAS_NOCACHE_UACCESS 1
668
669 #ifdef CONFIG_X86_32
670 # include <asm/uaccess_32.h>
671 #else
672 # include <asm/uaccess_64.h>
673 #endif
674
675 unsigned long __must_check _copy_from_user(void *to, const void __user *from,
676                                            unsigned n);
677 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
678                                          unsigned n);
679
680 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
681 # define copy_user_diag __compiletime_error
682 #else
683 # define copy_user_diag __compiletime_warning
684 #endif
685
686 extern void copy_user_diag("copy_from_user() buffer size is too small")
687 copy_from_user_overflow(void);
688 extern void copy_user_diag("copy_to_user() buffer size is too small")
689 copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
690
691 #undef copy_user_diag
692
693 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
694
695 extern void
696 __compiletime_warning("copy_from_user() buffer size is not provably correct")
697 __copy_from_user_overflow(void) __asm__("copy_from_user_overflow");
698 #define __copy_from_user_overflow(size, count) __copy_from_user_overflow()
699
700 extern void
701 __compiletime_warning("copy_to_user() buffer size is not provably correct")
702 __copy_to_user_overflow(void) __asm__("copy_from_user_overflow");
703 #define __copy_to_user_overflow(size, count) __copy_to_user_overflow()
704
705 #else
706
707 static inline void
708 __copy_from_user_overflow(int size, unsigned long count)
709 {
710         WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
711 }
712
713 #define __copy_to_user_overflow __copy_from_user_overflow
714
715 #endif
716
717 static inline unsigned long __must_check
718 copy_from_user(void *to, const void __user *from, unsigned long n)
719 {
720         int sz = __compiletime_object_size(to);
721
722         might_fault();
723
724         /*
725          * While we would like to have the compiler do the checking for us
726          * even in the non-constant size case, any false positives there are
727          * a problem (especially when DEBUG_STRICT_USER_COPY_CHECKS, but even
728          * without - the [hopefully] dangerous looking nature of the warning
729          * would make people go look at the respecitive call sites over and
730          * over again just to find that there's no problem).
731          *
732          * And there are cases where it's just not realistic for the compiler
733          * to prove the count to be in range. For example when multiple call
734          * sites of a helper function - perhaps in different source files -
735          * all doing proper range checking, yet the helper function not doing
736          * so again.
737          *
738          * Therefore limit the compile time checking to the constant size
739          * case, and do only runtime checking for non-constant sizes.
740          */
741
742         if (likely(sz < 0 || sz >= n))
743                 n = _copy_from_user(to, from, n);
744         else if(__builtin_constant_p(n))
745                 copy_from_user_overflow();
746         else
747                 __copy_from_user_overflow(sz, n);
748
749         return n;
750 }
751
752 static inline unsigned long __must_check
753 copy_to_user(void __user *to, const void *from, unsigned long n)
754 {
755         int sz = __compiletime_object_size(from);
756
757         might_fault();
758
759         /* See the comment in copy_from_user() above. */
760         if (likely(sz < 0 || sz >= n))
761                 n = _copy_to_user(to, from, n);
762         else if(__builtin_constant_p(n))
763                 copy_to_user_overflow();
764         else
765                 __copy_to_user_overflow(sz, n);
766
767         return n;
768 }
769
770 #undef __copy_from_user_overflow
771 #undef __copy_to_user_overflow
772
773 /*
774  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
775  * nested NMI paths are careful to preserve CR2.
776  *
777  * Caller must use pagefault_enable/disable, or run in interrupt context,
778  * and also do a uaccess_ok() check
779  */
780 #define __copy_from_user_nmi __copy_from_user_inatomic
781
782 /*
783  * The "unsafe" user accesses aren't really "unsafe", but the naming
784  * is a big fat warning: you have to not only do the access_ok()
785  * checking before using them, but you have to surround them with the
786  * user_access_begin/end() pair.
787  */
788 #define user_access_begin()     __uaccess_begin()
789 #define user_access_end()       __uaccess_end()
790
791 #define unsafe_put_user(x, ptr)                                         \
792 ({                                                                              \
793         int __pu_err;                                                           \
794         __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
795         __builtin_expect(__pu_err, 0);                                          \
796 })
797
798 #define unsafe_get_user(x, ptr)                                         \
799 ({                                                                              \
800         int __gu_err;                                                           \
801         unsigned long __gu_val;                                                 \
802         __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
803         (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
804         __builtin_expect(__gu_err, 0);                                          \
805 })
806
807 #endif /* _ASM_X86_UACCESS_H */
808