fix minor infoleak in get_user_ex()
[cascardo/linux.git] / arch / x86 / include / asm / uaccess.h
1 #ifndef _ASM_X86_UACCESS_H
2 #define _ASM_X86_UACCESS_H
3 /*
4  * User space memory access functions
5  */
6 #include <linux/errno.h>
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/thread_info.h>
10 #include <linux/string.h>
11 #include <asm/asm.h>
12 #include <asm/page.h>
13 #include <asm/smap.h>
14
15 #define VERIFY_READ 0
16 #define VERIFY_WRITE 1
17
18 /*
19  * The fs value determines whether argument validity checking should be
20  * performed or not.  If get_fs() == USER_DS, checking is performed, with
21  * get_fs() == KERNEL_DS, checking is bypassed.
22  *
23  * For historical reasons, these macros are grossly misnamed.
24  */
25
26 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
27
28 #define KERNEL_DS       MAKE_MM_SEG(-1UL)
29 #define USER_DS         MAKE_MM_SEG(TASK_SIZE_MAX)
30
31 #define get_ds()        (KERNEL_DS)
32 #define get_fs()        (current->thread.addr_limit)
33 #define set_fs(x)       (current->thread.addr_limit = (x))
34
35 #define segment_eq(a, b)        ((a).seg == (b).seg)
36
37 #define user_addr_max() (current->thread.addr_limit.seg)
38 #define __addr_ok(addr)         \
39         ((unsigned long __force)(addr) < user_addr_max())
40
41 /*
42  * Test whether a block of memory is a valid user space address.
43  * Returns 0 if the range is valid, nonzero otherwise.
44  */
45 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
46 {
47         /*
48          * If we have used "sizeof()" for the size,
49          * we know it won't overflow the limit (but
50          * it might overflow the 'addr', so it's
51          * important to subtract the size from the
52          * limit, not add it to the address).
53          */
54         if (__builtin_constant_p(size))
55                 return unlikely(addr > limit - size);
56
57         /* Arbitrary sizes? Be careful about overflow */
58         addr += size;
59         if (unlikely(addr < size))
60                 return true;
61         return unlikely(addr > limit);
62 }
63
64 #define __range_not_ok(addr, size, limit)                               \
65 ({                                                                      \
66         __chk_user_ptr(addr);                                           \
67         __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
68 })
69
70 /**
71  * access_ok: - Checks if a user space pointer is valid
72  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
73  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
74  *        to write to a block, it is always safe to read from it.
75  * @addr: User space pointer to start of block to check
76  * @size: Size of block to check
77  *
78  * Context: User context only. This function may sleep if pagefaults are
79  *          enabled.
80  *
81  * Checks if a pointer to a block of memory in user space is valid.
82  *
83  * Returns true (nonzero) if the memory block may be valid, false (zero)
84  * if it is definitely invalid.
85  *
86  * Note that, depending on architecture, this function probably just
87  * checks that the pointer is in the user space range - after calling
88  * this function, memory access functions may still return -EFAULT.
89  */
90 #define access_ok(type, addr, size) \
91         likely(!__range_not_ok(addr, size, user_addr_max()))
92
93 /*
94  * The exception table consists of triples of addresses relative to the
95  * exception table entry itself. The first address is of an instruction
96  * that is allowed to fault, the second is the target at which the program
97  * should continue. The third is a handler function to deal with the fault
98  * caused by the instruction in the first field.
99  *
100  * All the routines below use bits of fixup code that are out of line
101  * with the main instruction path.  This means when everything is well,
102  * we don't even have to jump over them.  Further, they do not intrude
103  * on our cache or tlb entries.
104  */
105
106 struct exception_table_entry {
107         int insn, fixup, handler;
108 };
109
110 #define ARCH_HAS_RELATIVE_EXTABLE
111
112 #define swap_ex_entry_fixup(a, b, tmp, delta)                   \
113         do {                                                    \
114                 (a)->fixup = (b)->fixup + (delta);              \
115                 (b)->fixup = (tmp).fixup - (delta);             \
116                 (a)->handler = (b)->handler + (delta);          \
117                 (b)->handler = (tmp).handler - (delta);         \
118         } while (0)
119
120 extern int fixup_exception(struct pt_regs *regs, int trapnr);
121 extern bool ex_has_fault_handler(unsigned long ip);
122 extern void early_fixup_exception(struct pt_regs *regs, int trapnr);
123
124 /*
125  * These are the main single-value transfer routines.  They automatically
126  * use the right size if we just have the right pointer type.
127  *
128  * This gets kind of ugly. We want to return _two_ values in "get_user()"
129  * and yet we don't want to do any pointers, because that is too much
130  * of a performance impact. Thus we have a few rather ugly macros here,
131  * and hide all the ugliness from the user.
132  *
133  * The "__xxx" versions of the user access functions are versions that
134  * do not verify the address space, that must have been done previously
135  * with a separate "access_ok()" call (this is used when we do multiple
136  * accesses to the same area of user memory).
137  */
138
139 extern int __get_user_1(void);
140 extern int __get_user_2(void);
141 extern int __get_user_4(void);
142 extern int __get_user_8(void);
143 extern int __get_user_bad(void);
144
145 #define __uaccess_begin() stac()
146 #define __uaccess_end()   clac()
147
148 /*
149  * This is a type: either unsigned long, if the argument fits into
150  * that type, or otherwise unsigned long long.
151  */
152 #define __inttype(x) \
153 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
154
155 /**
156  * get_user: - Get a simple variable from user space.
157  * @x:   Variable to store result.
158  * @ptr: Source address, in user space.
159  *
160  * Context: User context only. This function may sleep if pagefaults are
161  *          enabled.
162  *
163  * This macro copies a single simple variable from user space to kernel
164  * space.  It supports simple types like char and int, but not larger
165  * data types like structures or arrays.
166  *
167  * @ptr must have pointer-to-simple-variable type, and the result of
168  * dereferencing @ptr must be assignable to @x without a cast.
169  *
170  * Returns zero on success, or -EFAULT on error.
171  * On error, the variable @x is set to zero.
172  */
173 /*
174  * Careful: we have to cast the result to the type of the pointer
175  * for sign reasons.
176  *
177  * The use of _ASM_DX as the register specifier is a bit of a
178  * simplification, as gcc only cares about it as the starting point
179  * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
180  * (%ecx being the next register in gcc's x86 register sequence), and
181  * %rdx on 64 bits.
182  *
183  * Clang/LLVM cares about the size of the register, but still wants
184  * the base register for something that ends up being a pair.
185  */
186 #define get_user(x, ptr)                                                \
187 ({                                                                      \
188         int __ret_gu;                                                   \
189         register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);            \
190         register void *__sp asm(_ASM_SP);                               \
191         __chk_user_ptr(ptr);                                            \
192         might_fault();                                                  \
193         asm volatile("call __get_user_%P4"                              \
194                      : "=a" (__ret_gu), "=r" (__val_gu), "+r" (__sp)    \
195                      : "0" (ptr), "i" (sizeof(*(ptr))));                \
196         (x) = (__force __typeof__(*(ptr))) __val_gu;                    \
197         __builtin_expect(__ret_gu, 0);                                  \
198 })
199
200 #define __put_user_x(size, x, ptr, __ret_pu)                    \
201         asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
202                      : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
203
204
205
206 #ifdef CONFIG_X86_32
207 #define __put_user_asm_u64(x, addr, err, errret)                        \
208         asm volatile("\n"                                               \
209                      "1:        movl %%eax,0(%2)\n"                     \
210                      "2:        movl %%edx,4(%2)\n"                     \
211                      "3:"                                               \
212                      ".section .fixup,\"ax\"\n"                         \
213                      "4:        movl %3,%0\n"                           \
214                      "  jmp 3b\n"                                       \
215                      ".previous\n"                                      \
216                      _ASM_EXTABLE(1b, 4b)                               \
217                      _ASM_EXTABLE(2b, 4b)                               \
218                      : "=r" (err)                                       \
219                      : "A" (x), "r" (addr), "i" (errret), "0" (err))
220
221 #define __put_user_asm_ex_u64(x, addr)                                  \
222         asm volatile("\n"                                               \
223                      "1:        movl %%eax,0(%1)\n"                     \
224                      "2:        movl %%edx,4(%1)\n"                     \
225                      "3:"                                               \
226                      _ASM_EXTABLE_EX(1b, 2b)                            \
227                      _ASM_EXTABLE_EX(2b, 3b)                            \
228                      : : "A" (x), "r" (addr))
229
230 #define __put_user_x8(x, ptr, __ret_pu)                         \
231         asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
232                      : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
233 #else
234 #define __put_user_asm_u64(x, ptr, retval, errret) \
235         __put_user_asm(x, ptr, retval, "q", "", "er", errret)
236 #define __put_user_asm_ex_u64(x, addr)  \
237         __put_user_asm_ex(x, addr, "q", "", "er")
238 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
239 #endif
240
241 extern void __put_user_bad(void);
242
243 /*
244  * Strange magic calling convention: pointer in %ecx,
245  * value in %eax(:%edx), return value in %eax. clobbers %rbx
246  */
247 extern void __put_user_1(void);
248 extern void __put_user_2(void);
249 extern void __put_user_4(void);
250 extern void __put_user_8(void);
251
252 /**
253  * put_user: - Write a simple value into user space.
254  * @x:   Value to copy to user space.
255  * @ptr: Destination address, in user space.
256  *
257  * Context: User context only. This function may sleep if pagefaults are
258  *          enabled.
259  *
260  * This macro copies a single simple value from kernel space to user
261  * space.  It supports simple types like char and int, but not larger
262  * data types like structures or arrays.
263  *
264  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
265  * to the result of dereferencing @ptr.
266  *
267  * Returns zero on success, or -EFAULT on error.
268  */
269 #define put_user(x, ptr)                                        \
270 ({                                                              \
271         int __ret_pu;                                           \
272         __typeof__(*(ptr)) __pu_val;                            \
273         __chk_user_ptr(ptr);                                    \
274         might_fault();                                          \
275         __pu_val = x;                                           \
276         switch (sizeof(*(ptr))) {                               \
277         case 1:                                                 \
278                 __put_user_x(1, __pu_val, ptr, __ret_pu);       \
279                 break;                                          \
280         case 2:                                                 \
281                 __put_user_x(2, __pu_val, ptr, __ret_pu);       \
282                 break;                                          \
283         case 4:                                                 \
284                 __put_user_x(4, __pu_val, ptr, __ret_pu);       \
285                 break;                                          \
286         case 8:                                                 \
287                 __put_user_x8(__pu_val, ptr, __ret_pu);         \
288                 break;                                          \
289         default:                                                \
290                 __put_user_x(X, __pu_val, ptr, __ret_pu);       \
291                 break;                                          \
292         }                                                       \
293         __builtin_expect(__ret_pu, 0);                          \
294 })
295
296 #define __put_user_size(x, ptr, size, retval, errret)                   \
297 do {                                                                    \
298         retval = 0;                                                     \
299         __chk_user_ptr(ptr);                                            \
300         switch (size) {                                                 \
301         case 1:                                                         \
302                 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
303                 break;                                                  \
304         case 2:                                                         \
305                 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
306                 break;                                                  \
307         case 4:                                                         \
308                 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
309                 break;                                                  \
310         case 8:                                                         \
311                 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
312                                    errret);                             \
313                 break;                                                  \
314         default:                                                        \
315                 __put_user_bad();                                       \
316         }                                                               \
317 } while (0)
318
319 /*
320  * This doesn't do __uaccess_begin/end - the exception handling
321  * around it must do that.
322  */
323 #define __put_user_size_ex(x, ptr, size)                                \
324 do {                                                                    \
325         __chk_user_ptr(ptr);                                            \
326         switch (size) {                                                 \
327         case 1:                                                         \
328                 __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
329                 break;                                                  \
330         case 2:                                                         \
331                 __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
332                 break;                                                  \
333         case 4:                                                         \
334                 __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
335                 break;                                                  \
336         case 8:                                                         \
337                 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
338                 break;                                                  \
339         default:                                                        \
340                 __put_user_bad();                                       \
341         }                                                               \
342 } while (0)
343
344 #ifdef CONFIG_X86_32
345 #define __get_user_asm_u64(x, ptr, retval, errret)                      \
346 ({                                                                      \
347         __typeof__(ptr) __ptr = (ptr);                                  \
348         asm volatile(ASM_STAC "\n"                                      \
349                      "1:        movl %2,%%eax\n"                        \
350                      "2:        movl %3,%%edx\n"                        \
351                      "3: " ASM_CLAC "\n"                                \
352                      ".section .fixup,\"ax\"\n"                         \
353                      "4:        mov %4,%0\n"                            \
354                      "  xorl %%eax,%%eax\n"                             \
355                      "  xorl %%edx,%%edx\n"                             \
356                      "  jmp 3b\n"                                       \
357                      ".previous\n"                                      \
358                      _ASM_EXTABLE(1b, 4b)                               \
359                      _ASM_EXTABLE(2b, 4b)                               \
360                      : "=r" (retval), "=A"(x)                           \
361                      : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
362                        "i" (errret), "0" (retval));                     \
363 })
364
365 #define __get_user_asm_ex_u64(x, ptr)                   (x) = __get_user_bad()
366 #else
367 #define __get_user_asm_u64(x, ptr, retval, errret) \
368          __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
369 #define __get_user_asm_ex_u64(x, ptr) \
370          __get_user_asm_ex(x, ptr, "q", "", "=r")
371 #endif
372
373 #define __get_user_size(x, ptr, size, retval, errret)                   \
374 do {                                                                    \
375         retval = 0;                                                     \
376         __chk_user_ptr(ptr);                                            \
377         switch (size) {                                                 \
378         case 1:                                                         \
379                 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
380                 break;                                                  \
381         case 2:                                                         \
382                 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
383                 break;                                                  \
384         case 4:                                                         \
385                 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
386                 break;                                                  \
387         case 8:                                                         \
388                 __get_user_asm_u64(x, ptr, retval, errret);             \
389                 break;                                                  \
390         default:                                                        \
391                 (x) = __get_user_bad();                                 \
392         }                                                               \
393 } while (0)
394
395 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
396         asm volatile("\n"                                               \
397                      "1:        mov"itype" %2,%"rtype"1\n"              \
398                      "2:\n"                                             \
399                      ".section .fixup,\"ax\"\n"                         \
400                      "3:        mov %3,%0\n"                            \
401                      "  xor"itype" %"rtype"1,%"rtype"1\n"               \
402                      "  jmp 2b\n"                                       \
403                      ".previous\n"                                      \
404                      _ASM_EXTABLE(1b, 3b)                               \
405                      : "=r" (err), ltype(x)                             \
406                      : "m" (__m(addr)), "i" (errret), "0" (err))
407
408 /*
409  * This doesn't do __uaccess_begin/end - the exception handling
410  * around it must do that.
411  */
412 #define __get_user_size_ex(x, ptr, size)                                \
413 do {                                                                    \
414         __chk_user_ptr(ptr);                                            \
415         switch (size) {                                                 \
416         case 1:                                                         \
417                 __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
418                 break;                                                  \
419         case 2:                                                         \
420                 __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
421                 break;                                                  \
422         case 4:                                                         \
423                 __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
424                 break;                                                  \
425         case 8:                                                         \
426                 __get_user_asm_ex_u64(x, ptr);                          \
427                 break;                                                  \
428         default:                                                        \
429                 (x) = __get_user_bad();                                 \
430         }                                                               \
431 } while (0)
432
433 #define __get_user_asm_ex(x, addr, itype, rtype, ltype)                 \
434         asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
435                      "2:\n"                                             \
436                      ".section .fixup,\"ax\"\n"                         \
437                      "3:xor"itype" %"rtype"0,%"rtype"0\n"               \
438                      "  jmp 2b\n"                                       \
439                      ".previous\n"                                      \
440                      _ASM_EXTABLE_EX(1b, 3b)                            \
441                      : ltype(x) : "m" (__m(addr)))
442
443 #define __put_user_nocheck(x, ptr, size)                        \
444 ({                                                              \
445         int __pu_err;                                           \
446         __uaccess_begin();                                      \
447         __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
448         __uaccess_end();                                        \
449         __builtin_expect(__pu_err, 0);                          \
450 })
451
452 #define __get_user_nocheck(x, ptr, size)                                \
453 ({                                                                      \
454         int __gu_err;                                                   \
455         __inttype(*(ptr)) __gu_val;                                     \
456         __uaccess_begin();                                              \
457         __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
458         __uaccess_end();                                                \
459         (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
460         __builtin_expect(__gu_err, 0);                                  \
461 })
462
463 /* FIXME: this hack is definitely wrong -AK */
464 struct __large_struct { unsigned long buf[100]; };
465 #define __m(x) (*(struct __large_struct __user *)(x))
466
467 /*
468  * Tell gcc we read from memory instead of writing: this is because
469  * we do not write to any memory gcc knows about, so there are no
470  * aliasing issues.
471  */
472 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret)       \
473         asm volatile("\n"                                               \
474                      "1:        mov"itype" %"rtype"1,%2\n"              \
475                      "2:\n"                                             \
476                      ".section .fixup,\"ax\"\n"                         \
477                      "3:        mov %3,%0\n"                            \
478                      "  jmp 2b\n"                                       \
479                      ".previous\n"                                      \
480                      _ASM_EXTABLE(1b, 3b)                               \
481                      : "=r"(err)                                        \
482                      : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
483
484 #define __put_user_asm_ex(x, addr, itype, rtype, ltype)                 \
485         asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
486                      "2:\n"                                             \
487                      _ASM_EXTABLE_EX(1b, 2b)                            \
488                      : : ltype(x), "m" (__m(addr)))
489
490 /*
491  * uaccess_try and catch
492  */
493 #define uaccess_try     do {                                            \
494         current->thread.uaccess_err = 0;                                \
495         __uaccess_begin();                                              \
496         barrier();
497
498 #define uaccess_catch(err)                                              \
499         __uaccess_end();                                                \
500         (err) |= (current->thread.uaccess_err ? -EFAULT : 0);           \
501 } while (0)
502
503 /**
504  * __get_user: - Get a simple variable from user space, with less checking.
505  * @x:   Variable to store result.
506  * @ptr: Source address, in user space.
507  *
508  * Context: User context only. This function may sleep if pagefaults are
509  *          enabled.
510  *
511  * This macro copies a single simple variable from user space to kernel
512  * space.  It supports simple types like char and int, but not larger
513  * data types like structures or arrays.
514  *
515  * @ptr must have pointer-to-simple-variable type, and the result of
516  * dereferencing @ptr must be assignable to @x without a cast.
517  *
518  * Caller must check the pointer with access_ok() before calling this
519  * function.
520  *
521  * Returns zero on success, or -EFAULT on error.
522  * On error, the variable @x is set to zero.
523  */
524
525 #define __get_user(x, ptr)                                              \
526         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
527
528 /**
529  * __put_user: - Write a simple value into user space, with less checking.
530  * @x:   Value to copy to user space.
531  * @ptr: Destination address, in user space.
532  *
533  * Context: User context only. This function may sleep if pagefaults are
534  *          enabled.
535  *
536  * This macro copies a single simple value from kernel space to user
537  * space.  It supports simple types like char and int, but not larger
538  * data types like structures or arrays.
539  *
540  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
541  * to the result of dereferencing @ptr.
542  *
543  * Caller must check the pointer with access_ok() before calling this
544  * function.
545  *
546  * Returns zero on success, or -EFAULT on error.
547  */
548
549 #define __put_user(x, ptr)                                              \
550         __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
551
552 #define __get_user_unaligned __get_user
553 #define __put_user_unaligned __put_user
554
555 /*
556  * {get|put}_user_try and catch
557  *
558  * get_user_try {
559  *      get_user_ex(...);
560  * } get_user_catch(err)
561  */
562 #define get_user_try            uaccess_try
563 #define get_user_catch(err)     uaccess_catch(err)
564
565 #define get_user_ex(x, ptr)     do {                                    \
566         unsigned long __gue_val;                                        \
567         __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
568         (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
569 } while (0)
570
571 #define put_user_try            uaccess_try
572 #define put_user_catch(err)     uaccess_catch(err)
573
574 #define put_user_ex(x, ptr)                                             \
575         __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
576
577 extern unsigned long
578 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
579 extern __must_check long
580 strncpy_from_user(char *dst, const char __user *src, long count);
581
582 extern __must_check long strlen_user(const char __user *str);
583 extern __must_check long strnlen_user(const char __user *str, long n);
584
585 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
586 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
587
588 extern void __cmpxchg_wrong_size(void)
589         __compiletime_error("Bad argument size for cmpxchg");
590
591 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size)       \
592 ({                                                                      \
593         int __ret = 0;                                                  \
594         __typeof__(ptr) __uval = (uval);                                \
595         __typeof__(*(ptr)) __old = (old);                               \
596         __typeof__(*(ptr)) __new = (new);                               \
597         __uaccess_begin();                                              \
598         switch (size) {                                                 \
599         case 1:                                                         \
600         {                                                               \
601                 asm volatile("\n"                                       \
602                         "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n"          \
603                         "2:\n"                                          \
604                         "\t.section .fixup, \"ax\"\n"                   \
605                         "3:\tmov     %3, %0\n"                          \
606                         "\tjmp     2b\n"                                \
607                         "\t.previous\n"                                 \
608                         _ASM_EXTABLE(1b, 3b)                            \
609                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
610                         : "i" (-EFAULT), "q" (__new), "1" (__old)       \
611                         : "memory"                                      \
612                 );                                                      \
613                 break;                                                  \
614         }                                                               \
615         case 2:                                                         \
616         {                                                               \
617                 asm volatile("\n"                                       \
618                         "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n"          \
619                         "2:\n"                                          \
620                         "\t.section .fixup, \"ax\"\n"                   \
621                         "3:\tmov     %3, %0\n"                          \
622                         "\tjmp     2b\n"                                \
623                         "\t.previous\n"                                 \
624                         _ASM_EXTABLE(1b, 3b)                            \
625                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
626                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
627                         : "memory"                                      \
628                 );                                                      \
629                 break;                                                  \
630         }                                                               \
631         case 4:                                                         \
632         {                                                               \
633                 asm volatile("\n"                                       \
634                         "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"          \
635                         "2:\n"                                          \
636                         "\t.section .fixup, \"ax\"\n"                   \
637                         "3:\tmov     %3, %0\n"                          \
638                         "\tjmp     2b\n"                                \
639                         "\t.previous\n"                                 \
640                         _ASM_EXTABLE(1b, 3b)                            \
641                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
642                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
643                         : "memory"                                      \
644                 );                                                      \
645                 break;                                                  \
646         }                                                               \
647         case 8:                                                         \
648         {                                                               \
649                 if (!IS_ENABLED(CONFIG_X86_64))                         \
650                         __cmpxchg_wrong_size();                         \
651                                                                         \
652                 asm volatile("\n"                                       \
653                         "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n"          \
654                         "2:\n"                                          \
655                         "\t.section .fixup, \"ax\"\n"                   \
656                         "3:\tmov     %3, %0\n"                          \
657                         "\tjmp     2b\n"                                \
658                         "\t.previous\n"                                 \
659                         _ASM_EXTABLE(1b, 3b)                            \
660                         : "+r" (__ret), "=a" (__old), "+m" (*(ptr))     \
661                         : "i" (-EFAULT), "r" (__new), "1" (__old)       \
662                         : "memory"                                      \
663                 );                                                      \
664                 break;                                                  \
665         }                                                               \
666         default:                                                        \
667                 __cmpxchg_wrong_size();                                 \
668         }                                                               \
669         __uaccess_end();                                                \
670         *__uval = __old;                                                \
671         __ret;                                                          \
672 })
673
674 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new)               \
675 ({                                                                      \
676         access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ?                \
677                 __user_atomic_cmpxchg_inatomic((uval), (ptr),           \
678                                 (old), (new), sizeof(*(ptr))) :         \
679                 -EFAULT;                                                \
680 })
681
682 /*
683  * movsl can be slow when source and dest are not both 8-byte aligned
684  */
685 #ifdef CONFIG_X86_INTEL_USERCOPY
686 extern struct movsl_mask {
687         int mask;
688 } ____cacheline_aligned_in_smp movsl_mask;
689 #endif
690
691 #define ARCH_HAS_NOCACHE_UACCESS 1
692
693 #ifdef CONFIG_X86_32
694 # include <asm/uaccess_32.h>
695 #else
696 # include <asm/uaccess_64.h>
697 #endif
698
699 unsigned long __must_check _copy_from_user(void *to, const void __user *from,
700                                            unsigned n);
701 unsigned long __must_check _copy_to_user(void __user *to, const void *from,
702                                          unsigned n);
703
704 extern void __compiletime_error("usercopy buffer size is too small")
705 __bad_copy_user(void);
706
707 static inline void copy_user_overflow(int size, unsigned long count)
708 {
709         WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
710 }
711
712 static __always_inline unsigned long __must_check
713 copy_from_user(void *to, const void __user *from, unsigned long n)
714 {
715         int sz = __compiletime_object_size(to);
716
717         might_fault();
718
719         kasan_check_write(to, n);
720
721         if (likely(sz < 0 || sz >= n)) {
722                 check_object_size(to, n, false);
723                 n = _copy_from_user(to, from, n);
724         } else if (!__builtin_constant_p(n))
725                 copy_user_overflow(sz, n);
726         else
727                 __bad_copy_user();
728
729         return n;
730 }
731
732 static __always_inline unsigned long __must_check
733 copy_to_user(void __user *to, const void *from, unsigned long n)
734 {
735         int sz = __compiletime_object_size(from);
736
737         kasan_check_read(from, n);
738
739         might_fault();
740
741         if (likely(sz < 0 || sz >= n)) {
742                 check_object_size(from, n, true);
743                 n = _copy_to_user(to, from, n);
744         } else if (!__builtin_constant_p(n))
745                 copy_user_overflow(sz, n);
746         else
747                 __bad_copy_user();
748
749         return n;
750 }
751
752 /*
753  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
754  * nested NMI paths are careful to preserve CR2.
755  *
756  * Caller must use pagefault_enable/disable, or run in interrupt context,
757  * and also do a uaccess_ok() check
758  */
759 #define __copy_from_user_nmi __copy_from_user_inatomic
760
761 /*
762  * The "unsafe" user accesses aren't really "unsafe", but the naming
763  * is a big fat warning: you have to not only do the access_ok()
764  * checking before using them, but you have to surround them with the
765  * user_access_begin/end() pair.
766  */
767 #define user_access_begin()     __uaccess_begin()
768 #define user_access_end()       __uaccess_end()
769
770 #define unsafe_put_user(x, ptr, err_label)                                      \
771 do {                                                                            \
772         int __pu_err;                                                           \
773         __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
774         if (unlikely(__pu_err)) goto err_label;                                 \
775 } while (0)
776
777 #define unsafe_get_user(x, ptr, err_label)                                      \
778 do {                                                                            \
779         int __gu_err;                                                           \
780         unsigned long __gu_val;                                                 \
781         __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
782         (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
783         if (unlikely(__gu_err)) goto err_label;                                 \
784 } while (0)
785
786 #endif /* _ASM_X86_UACCESS_H */
787