f3624b73050f9a4dbb31a96a678fa449a3182301
[cascardo/linux.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <asm/asm-eva.h>
18
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33
34 #define __UA_ADDR       ".word"
35 #define __UA_LA         "la"
36 #define __UA_ADDU       "addu"
37 #define __UA_t0         "$8"
38 #define __UA_t1         "$9"
39
40 #endif /* CONFIG_32BIT */
41
42 #ifdef CONFIG_64BIT
43
44 extern u64 __ua_limit;
45
46 #define __UA_LIMIT      __ua_limit
47
48 #define __UA_ADDR       ".dword"
49 #define __UA_LA         "dla"
50 #define __UA_ADDU       "daddu"
51 #define __UA_t0         "$12"
52 #define __UA_t1         "$13"
53
54 #endif /* CONFIG_64BIT */
55
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
66 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS       ((mm_segment_t) { 0UL })
69 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
70 #endif
71
72 #define VERIFY_READ    0
73 #define VERIFY_WRITE   1
74
75 #define get_ds()        (KERNEL_DS)
76 #define get_fs()        (current_thread_info()->addr_limit)
77 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
78
79 #define segment_eq(a, b)        ((a).seg == (b).seg)
80
81
82 /*
83  * Is a address valid? This does a straighforward calculation rather
84  * than tests.
85  *
86  * Address valid if:
87  *  - "addr" doesn't have any high-bits set
88  *  - AND "size" doesn't have any high-bits set
89  *  - AND "addr+size" doesn't have any high-bits set
90  *  - OR we are in kernel mode.
91  *
92  * __ua_size() is a trick to avoid runtime checking of positive constant
93  * sizes; for those we already know at compile time that the size is ok.
94  */
95 #define __ua_size(size)                                                 \
96         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
97
98 /*
99  * access_ok: - Checks if a user space pointer is valid
100  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
101  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
102  *        to write to a block, it is always safe to read from it.
103  * @addr: User space pointer to start of block to check
104  * @size: Size of block to check
105  *
106  * Context: User context only.  This function may sleep.
107  *
108  * Checks if a pointer to a block of memory in user space is valid.
109  *
110  * Returns true (nonzero) if the memory block may be valid, false (zero)
111  * if it is definitely invalid.
112  *
113  * Note that, depending on architecture, this function probably just
114  * checks that the pointer is in the user space range - after calling
115  * this function, memory access functions may still return -EFAULT.
116  */
117
118 #define __access_mask get_fs().seg
119
120 #define __access_ok(addr, size, mask)                                   \
121 ({                                                                      \
122         unsigned long __addr = (unsigned long) (addr);                  \
123         unsigned long __size = size;                                    \
124         unsigned long __mask = mask;                                    \
125         unsigned long __ok;                                             \
126                                                                         \
127         __chk_user_ptr(addr);                                           \
128         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
129                 __ua_size(__size)));                                    \
130         __ok == 0;                                                      \
131 })
132
133 #define access_ok(type, addr, size)                                     \
134         likely(__access_ok((addr), (size), __access_mask))
135
136 /*
137  * put_user: - Write a simple value into user space.
138  * @x:   Value to copy to user space.
139  * @ptr: Destination address, in user space.
140  *
141  * Context: User context only.  This function may sleep.
142  *
143  * This macro copies a single simple value from kernel space to user
144  * space.  It supports simple types like char and int, but not larger
145  * data types like structures or arrays.
146  *
147  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
148  * to the result of dereferencing @ptr.
149  *
150  * Returns zero on success, or -EFAULT on error.
151  */
152 #define put_user(x,ptr) \
153         __put_user_check((x), (ptr), sizeof(*(ptr)))
154
155 /*
156  * get_user: - Get a simple variable from user space.
157  * @x:   Variable to store result.
158  * @ptr: Source address, in user space.
159  *
160  * Context: User context only.  This function may sleep.
161  *
162  * This macro copies a single simple variable from user space to kernel
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and the result of
167  * dereferencing @ptr must be assignable to @x without a cast.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  * On error, the variable @x is set to zero.
171  */
172 #define get_user(x,ptr) \
173         __get_user_check((x), (ptr), sizeof(*(ptr)))
174
175 /*
176  * __put_user: - Write a simple value into user space, with less checking.
177  * @x:   Value to copy to user space.
178  * @ptr: Destination address, in user space.
179  *
180  * Context: User context only.  This function may sleep.
181  *
182  * This macro copies a single simple value from kernel space to user
183  * space.  It supports simple types like char and int, but not larger
184  * data types like structures or arrays.
185  *
186  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
187  * to the result of dereferencing @ptr.
188  *
189  * Caller must check the pointer with access_ok() before calling this
190  * function.
191  *
192  * Returns zero on success, or -EFAULT on error.
193  */
194 #define __put_user(x,ptr) \
195         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
196
197 /*
198  * __get_user: - Get a simple variable from user space, with less checking.
199  * @x:   Variable to store result.
200  * @ptr: Source address, in user space.
201  *
202  * Context: User context only.  This function may sleep.
203  *
204  * This macro copies a single simple variable from user space to kernel
205  * space.  It supports simple types like char and int, but not larger
206  * data types like structures or arrays.
207  *
208  * @ptr must have pointer-to-simple-variable type, and the result of
209  * dereferencing @ptr must be assignable to @x without a cast.
210  *
211  * Caller must check the pointer with access_ok() before calling this
212  * function.
213  *
214  * Returns zero on success, or -EFAULT on error.
215  * On error, the variable @x is set to zero.
216  */
217 #define __get_user(x,ptr) \
218         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
219
220 struct __large_struct { unsigned long buf[100]; };
221 #define __m(x) (*(struct __large_struct __user *)(x))
222
223 /*
224  * Yuck.  We need two variants, one for 64bit operation and one
225  * for 32 bit mode and old iron.
226  */
227 #ifndef CONFIG_EVA
228 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229 #else
230 /*
231  * Kernel specific functions for EVA. We need to use normal load instructions
232  * to read data from kernel when operating in EVA mode. We use these macros to
233  * avoid redefining __get_user_asm for EVA.
234  */
235 #undef _loadd
236 #undef _loadw
237 #undef _loadh
238 #undef _loadb
239 #ifdef CONFIG_32BIT
240 #define _loadd                  _loadw
241 #else
242 #define _loadd(reg, addr)       "ld " reg ", " addr
243 #endif
244 #define _loadw(reg, addr)       "lw " reg ", " addr
245 #define _loadh(reg, addr)       "lh " reg ", " addr
246 #define _loadb(reg, addr)       "lb " reg ", " addr
247
248 #define __get_kernel_common(val, size, ptr)                             \
249 do {                                                                    \
250         switch (size) {                                                 \
251         case 1: __get_data_asm(val, _loadb, ptr); break;                \
252         case 2: __get_data_asm(val, _loadh, ptr); break;                \
253         case 4: __get_data_asm(val, _loadw, ptr); break;                \
254         case 8: __GET_DW(val, _loadd, ptr); break;                      \
255         default: __get_user_unknown(); break;                           \
256         }                                                               \
257 } while (0)
258 #endif
259
260 #ifdef CONFIG_32BIT
261 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
262 #endif
263 #ifdef CONFIG_64BIT
264 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
265 #endif
266
267 extern void __get_user_unknown(void);
268
269 #define __get_user_common(val, size, ptr)                               \
270 do {                                                                    \
271         switch (size) {                                                 \
272         case 1: __get_data_asm(val, user_lb, ptr); break;               \
273         case 2: __get_data_asm(val, user_lh, ptr); break;               \
274         case 4: __get_data_asm(val, user_lw, ptr); break;               \
275         case 8: __GET_DW(val, user_ld, ptr); break;                     \
276         default: __get_user_unknown(); break;                           \
277         }                                                               \
278 } while (0)
279
280 #define __get_user_nocheck(x, ptr, size)                                \
281 ({                                                                      \
282         int __gu_err;                                                   \
283                                                                         \
284         if (segment_eq(get_fs(), get_ds())) {                           \
285                 __get_kernel_common((x), size, ptr);                    \
286         } else {                                                        \
287                 __chk_user_ptr(ptr);                                    \
288                 __get_user_common((x), size, ptr);                      \
289         }                                                               \
290         __gu_err;                                                       \
291 })
292
293 #define __get_user_check(x, ptr, size)                                  \
294 ({                                                                      \
295         int __gu_err = -EFAULT;                                         \
296         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
297                                                                         \
298         might_fault();                                                  \
299         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
300                 if (segment_eq(get_fs(), get_ds()))                     \
301                         __get_kernel_common((x), size, __gu_ptr);       \
302                 else                                                    \
303                         __get_user_common((x), size, __gu_ptr);         \
304         }                                                               \
305                                                                         \
306         __gu_err;                                                       \
307 })
308
309 #define __get_data_asm(val, insn, addr)                                 \
310 {                                                                       \
311         long __gu_tmp;                                                  \
312                                                                         \
313         __asm__ __volatile__(                                           \
314         "1:     "insn("%1", "%3")"                              \n"     \
315         "2:                                                     \n"     \
316         "       .insn                                           \n"     \
317         "       .section .fixup,\"ax\"                          \n"     \
318         "3:     li      %0, %4                                  \n"     \
319         "       j       2b                                      \n"     \
320         "       .previous                                       \n"     \
321         "       .section __ex_table,\"a\"                       \n"     \
322         "       "__UA_ADDR "\t1b, 3b                            \n"     \
323         "       .previous                                       \n"     \
324         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
325         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
326                                                                         \
327         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
328 }
329
330 /*
331  * Get a long long 64 using 32 bit registers.
332  */
333 #define __get_data_asm_ll32(val, insn, addr)                            \
334 {                                                                       \
335         union {                                                         \
336                 unsigned long long      l;                              \
337                 __typeof__(*(addr))     t;                              \
338         } __gu_tmp;                                                     \
339                                                                         \
340         __asm__ __volatile__(                                           \
341         "1:     " insn("%1", "(%3)")"                           \n"     \
342         "2:     " insn("%D1", "4(%3)")"                         \n"     \
343         "3:                                                     \n"     \
344         "       .insn                                           \n"     \
345         "       .section        .fixup,\"ax\"                   \n"     \
346         "4:     li      %0, %4                                  \n"     \
347         "       move    %1, $0                                  \n"     \
348         "       move    %D1, $0                                 \n"     \
349         "       j       3b                                      \n"     \
350         "       .previous                                       \n"     \
351         "       .section        __ex_table,\"a\"                \n"     \
352         "       " __UA_ADDR "   1b, 4b                          \n"     \
353         "       " __UA_ADDR "   2b, 4b                          \n"     \
354         "       .previous                                       \n"     \
355         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
356         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
357                                                                         \
358         (val) = __gu_tmp.t;                                             \
359 }
360
361 #ifndef CONFIG_EVA
362 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
363 #else
364 /*
365  * Kernel specific functions for EVA. We need to use normal load instructions
366  * to read data from kernel when operating in EVA mode. We use these macros to
367  * avoid redefining __get_data_asm for EVA.
368  */
369 #undef _stored
370 #undef _storew
371 #undef _storeh
372 #undef _storeb
373 #ifdef CONFIG_32BIT
374 #define _stored                 _storew
375 #else
376 #define _stored(reg, addr)      "ld " reg ", " addr
377 #endif
378
379 #define _storew(reg, addr)      "sw " reg ", " addr
380 #define _storeh(reg, addr)      "sh " reg ", " addr
381 #define _storeb(reg, addr)      "sb " reg ", " addr
382
383 #define __put_kernel_common(ptr, size)                                  \
384 do {                                                                    \
385         switch (size) {                                                 \
386         case 1: __put_data_asm(_storeb, ptr); break;                    \
387         case 2: __put_data_asm(_storeh, ptr); break;                    \
388         case 4: __put_data_asm(_storew, ptr); break;                    \
389         case 8: __PUT_DW(_stored, ptr); break;                          \
390         default: __put_user_unknown(); break;                           \
391         }                                                               \
392 } while(0)
393 #endif
394
395 /*
396  * Yuck.  We need two variants, one for 64bit operation and one
397  * for 32 bit mode and old iron.
398  */
399 #ifdef CONFIG_32BIT
400 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
401 #endif
402 #ifdef CONFIG_64BIT
403 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
404 #endif
405
406 #define __put_user_common(ptr, size)                                    \
407 do {                                                                    \
408         switch (size) {                                                 \
409         case 1: __put_data_asm(user_sb, ptr); break;                    \
410         case 2: __put_data_asm(user_sh, ptr); break;                    \
411         case 4: __put_data_asm(user_sw, ptr); break;                    \
412         case 8: __PUT_DW(user_sd, ptr); break;                          \
413         default: __put_user_unknown(); break;                           \
414         }                                                               \
415 } while (0)
416
417 #define __put_user_nocheck(x, ptr, size)                                \
418 ({                                                                      \
419         __typeof__(*(ptr)) __pu_val;                                    \
420         int __pu_err = 0;                                               \
421                                                                         \
422         __pu_val = (x);                                                 \
423         if (segment_eq(get_fs(), get_ds())) {                           \
424                 __put_kernel_common(ptr, size);                         \
425         } else {                                                        \
426                 __chk_user_ptr(ptr);                                    \
427                 __put_user_common(ptr, size);                           \
428         }                                                               \
429         __pu_err;                                                       \
430 })
431
432 #define __put_user_check(x, ptr, size)                                  \
433 ({                                                                      \
434         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
435         __typeof__(*(ptr)) __pu_val = (x);                              \
436         int __pu_err = -EFAULT;                                         \
437                                                                         \
438         might_fault();                                                  \
439         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
440                 if (segment_eq(get_fs(), get_ds()))                     \
441                         __put_kernel_common(__pu_addr, size);           \
442                 else                                                    \
443                         __put_user_common(__pu_addr, size);             \
444         }                                                               \
445                                                                         \
446         __pu_err;                                                       \
447 })
448
449 #define __put_data_asm(insn, ptr)                                       \
450 {                                                                       \
451         __asm__ __volatile__(                                           \
452         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
453         "2:                                                     \n"     \
454         "       .insn                                           \n"     \
455         "       .section        .fixup,\"ax\"                   \n"     \
456         "3:     li      %0, %4                                  \n"     \
457         "       j       2b                                      \n"     \
458         "       .previous                                       \n"     \
459         "       .section        __ex_table,\"a\"                \n"     \
460         "       " __UA_ADDR "   1b, 3b                          \n"     \
461         "       .previous                                       \n"     \
462         : "=r" (__pu_err)                                               \
463         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
464           "i" (-EFAULT));                                               \
465 }
466
467 #define __put_data_asm_ll32(insn, ptr)                                  \
468 {                                                                       \
469         __asm__ __volatile__(                                           \
470         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
471         "2:     "insn("%D2", "4(%3)")"                          \n"     \
472         "3:                                                     \n"     \
473         "       .insn                                           \n"     \
474         "       .section        .fixup,\"ax\"                   \n"     \
475         "4:     li      %0, %4                                  \n"     \
476         "       j       3b                                      \n"     \
477         "       .previous                                       \n"     \
478         "       .section        __ex_table,\"a\"                \n"     \
479         "       " __UA_ADDR "   1b, 4b                          \n"     \
480         "       " __UA_ADDR "   2b, 4b                          \n"     \
481         "       .previous"                                              \
482         : "=r" (__pu_err)                                               \
483         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
484           "i" (-EFAULT));                                               \
485 }
486
487 extern void __put_user_unknown(void);
488
489 /*
490  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
491  * EVA unaligned access is handled in the ADE exception handler.
492  */
493 #ifndef CONFIG_EVA
494 /*
495  * put_user_unaligned: - Write a simple value into user space.
496  * @x:   Value to copy to user space.
497  * @ptr: Destination address, in user space.
498  *
499  * Context: User context only.  This function may sleep.
500  *
501  * This macro copies a single simple value from kernel space to user
502  * space.  It supports simple types like char and int, but not larger
503  * data types like structures or arrays.
504  *
505  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
506  * to the result of dereferencing @ptr.
507  *
508  * Returns zero on success, or -EFAULT on error.
509  */
510 #define put_user_unaligned(x,ptr)       \
511         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
512
513 /*
514  * get_user_unaligned: - Get a simple variable from user space.
515  * @x:   Variable to store result.
516  * @ptr: Source address, in user space.
517  *
518  * Context: User context only.  This function may sleep.
519  *
520  * This macro copies a single simple variable from user space to kernel
521  * space.  It supports simple types like char and int, but not larger
522  * data types like structures or arrays.
523  *
524  * @ptr must have pointer-to-simple-variable type, and the result of
525  * dereferencing @ptr must be assignable to @x without a cast.
526  *
527  * Returns zero on success, or -EFAULT on error.
528  * On error, the variable @x is set to zero.
529  */
530 #define get_user_unaligned(x,ptr) \
531         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
532
533 /*
534  * __put_user_unaligned: - Write a simple value into user space, with less checking.
535  * @x:   Value to copy to user space.
536  * @ptr: Destination address, in user space.
537  *
538  * Context: User context only.  This function may sleep.
539  *
540  * This macro copies a single simple value from kernel space to user
541  * space.  It supports simple types like char and int, but not larger
542  * data types like structures or arrays.
543  *
544  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
545  * to the result of dereferencing @ptr.
546  *
547  * Caller must check the pointer with access_ok() before calling this
548  * function.
549  *
550  * Returns zero on success, or -EFAULT on error.
551  */
552 #define __put_user_unaligned(x,ptr) \
553         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
554
555 /*
556  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
557  * @x:   Variable to store result.
558  * @ptr: Source address, in user space.
559  *
560  * Context: User context only.  This function may sleep.
561  *
562  * This macro copies a single simple variable from user space to kernel
563  * space.  It supports simple types like char and int, but not larger
564  * data types like structures or arrays.
565  *
566  * @ptr must have pointer-to-simple-variable type, and the result of
567  * dereferencing @ptr must be assignable to @x without a cast.
568  *
569  * Caller must check the pointer with access_ok() before calling this
570  * function.
571  *
572  * Returns zero on success, or -EFAULT on error.
573  * On error, the variable @x is set to zero.
574  */
575 #define __get_user_unaligned(x,ptr) \
576         __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
577
578 /*
579  * Yuck.  We need two variants, one for 64bit operation and one
580  * for 32 bit mode and old iron.
581  */
582 #ifdef CONFIG_32BIT
583 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
584         __get_user_unaligned_asm_ll32(val, ptr)
585 #endif
586 #ifdef CONFIG_64BIT
587 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
588         __get_user_unaligned_asm(val, "uld", ptr)
589 #endif
590
591 extern void __get_user_unaligned_unknown(void);
592
593 #define __get_user_unaligned_common(val, size, ptr)                     \
594 do {                                                                    \
595         switch (size) {                                                 \
596         case 1: __get_data_asm(val, "lb", ptr); break;                  \
597         case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;       \
598         case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;       \
599         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
600         default: __get_user_unaligned_unknown(); break;                 \
601         }                                                               \
602 } while (0)
603
604 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
605 ({                                                                      \
606         int __gu_err;                                                   \
607                                                                         \
608         __get_user_unaligned_common((x), size, ptr);                    \
609         __gu_err;                                                       \
610 })
611
612 #define __get_user_unaligned_check(x,ptr,size)                          \
613 ({                                                                      \
614         int __gu_err = -EFAULT;                                         \
615         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
616                                                                         \
617         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
618                 __get_user_unaligned_common((x), size, __gu_ptr);       \
619                                                                         \
620         __gu_err;                                                       \
621 })
622
623 #define __get_data_unaligned_asm(val, insn, addr)                       \
624 {                                                                       \
625         long __gu_tmp;                                                  \
626                                                                         \
627         __asm__ __volatile__(                                           \
628         "1:     " insn "        %1, %3                          \n"     \
629         "2:                                                     \n"     \
630         "       .insn                                           \n"     \
631         "       .section .fixup,\"ax\"                          \n"     \
632         "3:     li      %0, %4                                  \n"     \
633         "       j       2b                                      \n"     \
634         "       .previous                                       \n"     \
635         "       .section __ex_table,\"a\"                       \n"     \
636         "       "__UA_ADDR "\t1b, 3b                            \n"     \
637         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
638         "       .previous                                       \n"     \
639         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
640         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
641                                                                         \
642         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
643 }
644
645 /*
646  * Get a long long 64 using 32 bit registers.
647  */
648 #define __get_user_unaligned_asm_ll32(val, addr)                        \
649 {                                                                       \
650         unsigned long long __gu_tmp;                                    \
651                                                                         \
652         __asm__ __volatile__(                                           \
653         "1:     ulw     %1, (%3)                                \n"     \
654         "2:     ulw     %D1, 4(%3)                              \n"     \
655         "       move    %0, $0                                  \n"     \
656         "3:                                                     \n"     \
657         "       .insn                                           \n"     \
658         "       .section        .fixup,\"ax\"                   \n"     \
659         "4:     li      %0, %4                                  \n"     \
660         "       move    %1, $0                                  \n"     \
661         "       move    %D1, $0                                 \n"     \
662         "       j       3b                                      \n"     \
663         "       .previous                                       \n"     \
664         "       .section        __ex_table,\"a\"                \n"     \
665         "       " __UA_ADDR "   1b, 4b                          \n"     \
666         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
667         "       " __UA_ADDR "   2b, 4b                          \n"     \
668         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
669         "       .previous                                       \n"     \
670         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
671         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
672         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
673 }
674
675 /*
676  * Yuck.  We need two variants, one for 64bit operation and one
677  * for 32 bit mode and old iron.
678  */
679 #ifdef CONFIG_32BIT
680 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
681 #endif
682 #ifdef CONFIG_64BIT
683 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
684 #endif
685
686 #define __put_user_unaligned_common(ptr, size)                          \
687 do {                                                                    \
688         switch (size) {                                                 \
689         case 1: __put_data_asm("sb", ptr); break;                       \
690         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
691         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
692         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
693         default: __put_user_unaligned_unknown(); break;                 \
694 } while (0)
695
696 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
697 ({                                                                      \
698         __typeof__(*(ptr)) __pu_val;                                    \
699         int __pu_err = 0;                                               \
700                                                                         \
701         __pu_val = (x);                                                 \
702         __put_user_unaligned_common(ptr, size);                         \
703         __pu_err;                                                       \
704 })
705
706 #define __put_user_unaligned_check(x,ptr,size)                          \
707 ({                                                                      \
708         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
709         __typeof__(*(ptr)) __pu_val = (x);                              \
710         int __pu_err = -EFAULT;                                         \
711                                                                         \
712         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
713                 __put_user_unaligned_common(__pu_addr, size);           \
714                                                                         \
715         __pu_err;                                                       \
716 })
717
718 #define __put_user_unaligned_asm(insn, ptr)                             \
719 {                                                                       \
720         __asm__ __volatile__(                                           \
721         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
722         "2:                                                     \n"     \
723         "       .insn                                           \n"     \
724         "       .section        .fixup,\"ax\"                   \n"     \
725         "3:     li      %0, %4                                  \n"     \
726         "       j       2b                                      \n"     \
727         "       .previous                                       \n"     \
728         "       .section        __ex_table,\"a\"                \n"     \
729         "       " __UA_ADDR "   1b, 3b                          \n"     \
730         "       .previous                                       \n"     \
731         : "=r" (__pu_err)                                               \
732         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
733           "i" (-EFAULT));                                               \
734 }
735
736 #define __put_user_unaligned_asm_ll32(ptr)                              \
737 {                                                                       \
738         __asm__ __volatile__(                                           \
739         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
740         "2:     sw      %D2, 4(%3)                              \n"     \
741         "3:                                                     \n"     \
742         "       .insn                                           \n"     \
743         "       .section        .fixup,\"ax\"                   \n"     \
744         "4:     li      %0, %4                                  \n"     \
745         "       j       3b                                      \n"     \
746         "       .previous                                       \n"     \
747         "       .section        __ex_table,\"a\"                \n"     \
748         "       " __UA_ADDR "   1b, 4b                          \n"     \
749         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
750         "       " __UA_ADDR "   2b, 4b                          \n"     \
751         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
752         "       .previous"                                              \
753         : "=r" (__pu_err)                                               \
754         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
755           "i" (-EFAULT));                                               \
756 }
757
758 extern void __put_user_unaligned_unknown(void);
759 #endif
760
761 /*
762  * We're generating jump to subroutines which will be outside the range of
763  * jump instructions
764  */
765 #ifdef MODULE
766 #define __MODULE_JAL(destination)                                       \
767         ".set\tnoat\n\t"                                                \
768         __UA_LA "\t$1, " #destination "\n\t"                            \
769         "jalr\t$1\n\t"                                                  \
770         ".set\tat\n\t"
771 #else
772 #define __MODULE_JAL(destination)                                       \
773         "jal\t" #destination "\n\t"
774 #endif
775
776 #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
777 #define DADDI_SCRATCH "$0"
778 #else
779 #define DADDI_SCRATCH "$3"
780 #endif
781
782 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
783
784 #define __invoke_copy_to_user(to, from, n)                              \
785 ({                                                                      \
786         register void __user *__cu_to_r __asm__("$4");                  \
787         register const void *__cu_from_r __asm__("$5");                 \
788         register long __cu_len_r __asm__("$6");                         \
789                                                                         \
790         __cu_to_r = (to);                                               \
791         __cu_from_r = (from);                                           \
792         __cu_len_r = (n);                                               \
793         __asm__ __volatile__(                                           \
794         __MODULE_JAL(__copy_user)                                       \
795         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
796         :                                                               \
797         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
798           DADDI_SCRATCH, "memory");                                     \
799         __cu_len_r;                                                     \
800 })
801
802 /*
803  * __copy_to_user: - Copy a block of data into user space, with less checking.
804  * @to:   Destination address, in user space.
805  * @from: Source address, in kernel space.
806  * @n:    Number of bytes to copy.
807  *
808  * Context: User context only.  This function may sleep.
809  *
810  * Copy data from kernel space to user space.  Caller must check
811  * the specified block with access_ok() before calling this function.
812  *
813  * Returns number of bytes that could not be copied.
814  * On success, this will be zero.
815  */
816 #define __copy_to_user(to, from, n)                                     \
817 ({                                                                      \
818         void __user *__cu_to;                                           \
819         const void *__cu_from;                                          \
820         long __cu_len;                                                  \
821                                                                         \
822         __cu_to = (to);                                                 \
823         __cu_from = (from);                                             \
824         __cu_len = (n);                                                 \
825         might_fault();                                                  \
826         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
827         __cu_len;                                                       \
828 })
829
830 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
831
832 #define __copy_to_user_inatomic(to, from, n)                            \
833 ({                                                                      \
834         void __user *__cu_to;                                           \
835         const void *__cu_from;                                          \
836         long __cu_len;                                                  \
837                                                                         \
838         __cu_to = (to);                                                 \
839         __cu_from = (from);                                             \
840         __cu_len = (n);                                                 \
841         __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
842         __cu_len;                                                       \
843 })
844
845 #define __copy_from_user_inatomic(to, from, n)                          \
846 ({                                                                      \
847         void *__cu_to;                                                  \
848         const void __user *__cu_from;                                   \
849         long __cu_len;                                                  \
850                                                                         \
851         __cu_to = (to);                                                 \
852         __cu_from = (from);                                             \
853         __cu_len = (n);                                                 \
854         __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
855                                                     __cu_len);          \
856         __cu_len;                                                       \
857 })
858
859 /*
860  * copy_to_user: - Copy a block of data into user space.
861  * @to:   Destination address, in user space.
862  * @from: Source address, in kernel space.
863  * @n:    Number of bytes to copy.
864  *
865  * Context: User context only.  This function may sleep.
866  *
867  * Copy data from kernel space to user space.
868  *
869  * Returns number of bytes that could not be copied.
870  * On success, this will be zero.
871  */
872 #define copy_to_user(to, from, n)                                       \
873 ({                                                                      \
874         void __user *__cu_to;                                           \
875         const void *__cu_from;                                          \
876         long __cu_len;                                                  \
877                                                                         \
878         __cu_to = (to);                                                 \
879         __cu_from = (from);                                             \
880         __cu_len = (n);                                                 \
881         if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {               \
882                 might_fault();                                          \
883                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
884                                                  __cu_len);             \
885         }                                                               \
886         __cu_len;                                                       \
887 })
888
889 #define __invoke_copy_from_user(to, from, n)                            \
890 ({                                                                      \
891         register void *__cu_to_r __asm__("$4");                         \
892         register const void __user *__cu_from_r __asm__("$5");          \
893         register long __cu_len_r __asm__("$6");                         \
894                                                                         \
895         __cu_to_r = (to);                                               \
896         __cu_from_r = (from);                                           \
897         __cu_len_r = (n);                                               \
898         __asm__ __volatile__(                                           \
899         ".set\tnoreorder\n\t"                                           \
900         __MODULE_JAL(__copy_user)                                       \
901         ".set\tnoat\n\t"                                                \
902         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
903         ".set\tat\n\t"                                                  \
904         ".set\treorder"                                                 \
905         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
906         :                                                               \
907         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
908           DADDI_SCRATCH, "memory");                                     \
909         __cu_len_r;                                                     \
910 })
911
912 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
913 ({                                                                      \
914         register void *__cu_to_r __asm__("$4");                         \
915         register const void __user *__cu_from_r __asm__("$5");          \
916         register long __cu_len_r __asm__("$6");                         \
917                                                                         \
918         __cu_to_r = (to);                                               \
919         __cu_from_r = (from);                                           \
920         __cu_len_r = (n);                                               \
921         __asm__ __volatile__(                                           \
922         ".set\tnoreorder\n\t"                                           \
923         __MODULE_JAL(__copy_user_inatomic)                              \
924         ".set\tnoat\n\t"                                                \
925         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
926         ".set\tat\n\t"                                                  \
927         ".set\treorder"                                                 \
928         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
929         :                                                               \
930         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
931           DADDI_SCRATCH, "memory");                                     \
932         __cu_len_r;                                                     \
933 })
934
935 /*
936  * __copy_from_user: - Copy a block of data from user space, with less checking.
937  * @to:   Destination address, in kernel space.
938  * @from: Source address, in user space.
939  * @n:    Number of bytes to copy.
940  *
941  * Context: User context only.  This function may sleep.
942  *
943  * Copy data from user space to kernel space.  Caller must check
944  * the specified block with access_ok() before calling this function.
945  *
946  * Returns number of bytes that could not be copied.
947  * On success, this will be zero.
948  *
949  * If some data could not be copied, this function will pad the copied
950  * data to the requested size using zero bytes.
951  */
952 #define __copy_from_user(to, from, n)                                   \
953 ({                                                                      \
954         void *__cu_to;                                                  \
955         const void __user *__cu_from;                                   \
956         long __cu_len;                                                  \
957                                                                         \
958         __cu_to = (to);                                                 \
959         __cu_from = (from);                                             \
960         __cu_len = (n);                                                 \
961         might_fault();                                                  \
962         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
963                                            __cu_len);                   \
964         __cu_len;                                                       \
965 })
966
967 /*
968  * copy_from_user: - Copy a block of data from user space.
969  * @to:   Destination address, in kernel space.
970  * @from: Source address, in user space.
971  * @n:    Number of bytes to copy.
972  *
973  * Context: User context only.  This function may sleep.
974  *
975  * Copy data from user space to kernel space.
976  *
977  * Returns number of bytes that could not be copied.
978  * On success, this will be zero.
979  *
980  * If some data could not be copied, this function will pad the copied
981  * data to the requested size using zero bytes.
982  */
983 #define copy_from_user(to, from, n)                                     \
984 ({                                                                      \
985         void *__cu_to;                                                  \
986         const void __user *__cu_from;                                   \
987         long __cu_len;                                                  \
988                                                                         \
989         __cu_to = (to);                                                 \
990         __cu_from = (from);                                             \
991         __cu_len = (n);                                                 \
992         if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {              \
993                 might_fault();                                          \
994                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
995                                                    __cu_len);           \
996         }                                                               \
997         __cu_len;                                                       \
998 })
999
1000 #define __copy_in_user(to, from, n)                                     \
1001 ({                                                                      \
1002         void __user *__cu_to;                                           \
1003         const void __user *__cu_from;                                   \
1004         long __cu_len;                                                  \
1005                                                                         \
1006         __cu_to = (to);                                                 \
1007         __cu_from = (from);                                             \
1008         __cu_len = (n);                                                 \
1009         might_fault();                                                  \
1010         __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,          \
1011                                            __cu_len);                   \
1012         __cu_len;                                                       \
1013 })
1014
1015 #define copy_in_user(to, from, n)                                       \
1016 ({                                                                      \
1017         void __user *__cu_to;                                           \
1018         const void __user *__cu_from;                                   \
1019         long __cu_len;                                                  \
1020                                                                         \
1021         __cu_to = (to);                                                 \
1022         __cu_from = (from);                                             \
1023         __cu_len = (n);                                                 \
1024         if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&       \
1025                    access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {       \
1026                 might_fault();                                          \
1027                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
1028                                                    __cu_len);           \
1029         }                                                               \
1030         __cu_len;                                                       \
1031 })
1032
1033 /*
1034  * __clear_user: - Zero a block of memory in user space, with less checking.
1035  * @to:   Destination address, in user space.
1036  * @n:    Number of bytes to zero.
1037  *
1038  * Zero a block of memory in user space.  Caller must check
1039  * the specified block with access_ok() before calling this function.
1040  *
1041  * Returns number of bytes that could not be cleared.
1042  * On success, this will be zero.
1043  */
1044 static inline __kernel_size_t
1045 __clear_user(void __user *addr, __kernel_size_t size)
1046 {
1047         __kernel_size_t res;
1048
1049         might_fault();
1050         __asm__ __volatile__(
1051                 "move\t$4, %1\n\t"
1052                 "move\t$5, $0\n\t"
1053                 "move\t$6, %2\n\t"
1054                 __MODULE_JAL(__bzero)
1055                 "move\t%0, $6"
1056                 : "=r" (res)
1057                 : "r" (addr), "r" (size)
1058                 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1059
1060         return res;
1061 }
1062
1063 #define clear_user(addr,n)                                              \
1064 ({                                                                      \
1065         void __user * __cl_addr = (addr);                               \
1066         unsigned long __cl_size = (n);                                  \
1067         if (__cl_size && access_ok(VERIFY_WRITE,                        \
1068                                         __cl_addr, __cl_size))          \
1069                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
1070         __cl_size;                                                      \
1071 })
1072
1073 /*
1074  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1075  * @dst:   Destination address, in kernel space.  This buffer must be at
1076  *         least @count bytes long.
1077  * @src:   Source address, in user space.
1078  * @count: Maximum number of bytes to copy, including the trailing NUL.
1079  *
1080  * Copies a NUL-terminated string from userspace to kernel space.
1081  * Caller must check the specified block with access_ok() before calling
1082  * this function.
1083  *
1084  * On success, returns the length of the string (not including the trailing
1085  * NUL).
1086  *
1087  * If access to userspace fails, returns -EFAULT (some data may have been
1088  * copied).
1089  *
1090  * If @count is smaller than the length of the string, copies @count bytes
1091  * and returns @count.
1092  */
1093 static inline long
1094 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1095 {
1096         long res;
1097
1098         might_fault();
1099         __asm__ __volatile__(
1100                 "move\t$4, %1\n\t"
1101                 "move\t$5, %2\n\t"
1102                 "move\t$6, %3\n\t"
1103                 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1104                 "move\t%0, $2"
1105                 : "=r" (res)
1106                 : "r" (__to), "r" (__from), "r" (__len)
1107                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1108
1109         return res;
1110 }
1111
1112 /*
1113  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1114  * @dst:   Destination address, in kernel space.  This buffer must be at
1115  *         least @count bytes long.
1116  * @src:   Source address, in user space.
1117  * @count: Maximum number of bytes to copy, including the trailing NUL.
1118  *
1119  * Copies a NUL-terminated string from userspace to kernel space.
1120  *
1121  * On success, returns the length of the string (not including the trailing
1122  * NUL).
1123  *
1124  * If access to userspace fails, returns -EFAULT (some data may have been
1125  * copied).
1126  *
1127  * If @count is smaller than the length of the string, copies @count bytes
1128  * and returns @count.
1129  */
1130 static inline long
1131 strncpy_from_user(char *__to, const char __user *__from, long __len)
1132 {
1133         long res;
1134
1135         might_fault();
1136         __asm__ __volatile__(
1137                 "move\t$4, %1\n\t"
1138                 "move\t$5, %2\n\t"
1139                 "move\t$6, %3\n\t"
1140                 __MODULE_JAL(__strncpy_from_user_asm)
1141                 "move\t%0, $2"
1142                 : "=r" (res)
1143                 : "r" (__to), "r" (__from), "r" (__len)
1144                 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1145
1146         return res;
1147 }
1148
1149 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1150 static inline long __strlen_user(const char __user *s)
1151 {
1152         long res;
1153
1154         might_fault();
1155         __asm__ __volatile__(
1156                 "move\t$4, %1\n\t"
1157                 __MODULE_JAL(__strlen_user_nocheck_asm)
1158                 "move\t%0, $2"
1159                 : "=r" (res)
1160                 : "r" (s)
1161                 : "$2", "$4", __UA_t0, "$31");
1162
1163         return res;
1164 }
1165
1166 /*
1167  * strlen_user: - Get the size of a string in user space.
1168  * @str: The string to measure.
1169  *
1170  * Context: User context only.  This function may sleep.
1171  *
1172  * Get the size of a NUL-terminated string in user space.
1173  *
1174  * Returns the size of the string INCLUDING the terminating NUL.
1175  * On exception, returns 0.
1176  *
1177  * If there is a limit on the length of a valid string, you may wish to
1178  * consider using strnlen_user() instead.
1179  */
1180 static inline long strlen_user(const char __user *s)
1181 {
1182         long res;
1183
1184         might_fault();
1185         __asm__ __volatile__(
1186                 "move\t$4, %1\n\t"
1187                 __MODULE_JAL(__strlen_user_asm)
1188                 "move\t%0, $2"
1189                 : "=r" (res)
1190                 : "r" (s)
1191                 : "$2", "$4", __UA_t0, "$31");
1192
1193         return res;
1194 }
1195
1196 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1197 static inline long __strnlen_user(const char __user *s, long n)
1198 {
1199         long res;
1200
1201         might_fault();
1202         __asm__ __volatile__(
1203                 "move\t$4, %1\n\t"
1204                 "move\t$5, %2\n\t"
1205                 __MODULE_JAL(__strnlen_user_nocheck_asm)
1206                 "move\t%0, $2"
1207                 : "=r" (res)
1208                 : "r" (s), "r" (n)
1209                 : "$2", "$4", "$5", __UA_t0, "$31");
1210
1211         return res;
1212 }
1213
1214 /*
1215  * strlen_user: - Get the size of a string in user space.
1216  * @str: The string to measure.
1217  *
1218  * Context: User context only.  This function may sleep.
1219  *
1220  * Get the size of a NUL-terminated string in user space.
1221  *
1222  * Returns the size of the string INCLUDING the terminating NUL.
1223  * On exception, returns 0.
1224  *
1225  * If there is a limit on the length of a valid string, you may wish to
1226  * consider using strnlen_user() instead.
1227  */
1228 static inline long strnlen_user(const char __user *s, long n)
1229 {
1230         long res;
1231
1232         might_fault();
1233         __asm__ __volatile__(
1234                 "move\t$4, %1\n\t"
1235                 "move\t$5, %2\n\t"
1236                 __MODULE_JAL(__strnlen_user_asm)
1237                 "move\t%0, $2"
1238                 : "=r" (res)
1239                 : "r" (s), "r" (n)
1240                 : "$2", "$4", "$5", __UA_t0, "$31");
1241
1242         return res;
1243 }
1244
1245 struct exception_table_entry
1246 {
1247         unsigned long insn;
1248         unsigned long nextinsn;
1249 };
1250
1251 extern int fixup_exception(struct pt_regs *regs);
1252
1253 #endif /* _ASM_UACCESS_H */