Merge tag 'gcc-plugins-v4.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <linux/string.h>
18 #include <asm/asm-eva.h>
19 #include <asm/extable.h>
20
21 /*
22  * The fs value determines whether argument validity checking should be
23  * performed or not.  If get_fs() == USER_DS, checking is performed, with
24  * get_fs() == KERNEL_DS, checking is bypassed.
25  *
26  * For historical reasons, these macros are grossly misnamed.
27  */
28 #ifdef CONFIG_32BIT
29
30 #ifdef CONFIG_KVM_GUEST
31 #define __UA_LIMIT 0x40000000UL
32 #else
33 #define __UA_LIMIT 0x80000000UL
34 #endif
35
36 #define __UA_ADDR       ".word"
37 #define __UA_LA         "la"
38 #define __UA_ADDU       "addu"
39 #define __UA_t0         "$8"
40 #define __UA_t1         "$9"
41
42 #endif /* CONFIG_32BIT */
43
44 #ifdef CONFIG_64BIT
45
46 extern u64 __ua_limit;
47
48 #define __UA_LIMIT      __ua_limit
49
50 #define __UA_ADDR       ".dword"
51 #define __UA_LA         "dla"
52 #define __UA_ADDU       "daddu"
53 #define __UA_t0         "$12"
54 #define __UA_t1         "$13"
55
56 #endif /* CONFIG_64BIT */
57
58 /*
59  * USER_DS is a bitmask that has the bits set that may not be set in a valid
60  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
61  * the arithmetic we're doing only works if the limit is a power of two, so
62  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
63  * address in this range it's the process's problem, not ours :-)
64  */
65
66 #ifdef CONFIG_KVM_GUEST
67 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
68 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
69 #else
70 #define KERNEL_DS       ((mm_segment_t) { 0UL })
71 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
72 #endif
73
74 #define VERIFY_READ    0
75 #define VERIFY_WRITE   1
76
77 #define get_ds()        (KERNEL_DS)
78 #define get_fs()        (current_thread_info()->addr_limit)
79 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
80
81 #define segment_eq(a, b)        ((a).seg == (b).seg)
82
83 /*
84  * eva_kernel_access() - determine whether kernel memory access on an EVA system
85  *
86  * Determines whether memory accesses should be performed to kernel memory
87  * on a system using Extended Virtual Addressing (EVA).
88  *
89  * Return: true if a kernel memory access on an EVA system, else false.
90  */
91 static inline bool eva_kernel_access(void)
92 {
93         if (!IS_ENABLED(CONFIG_EVA))
94                 return false;
95
96         return segment_eq(get_fs(), get_ds());
97 }
98
99 /*
100  * Is a address valid? This does a straightforward calculation rather
101  * than tests.
102  *
103  * Address valid if:
104  *  - "addr" doesn't have any high-bits set
105  *  - AND "size" doesn't have any high-bits set
106  *  - AND "addr+size" doesn't have any high-bits set
107  *  - OR we are in kernel mode.
108  *
109  * __ua_size() is a trick to avoid runtime checking of positive constant
110  * sizes; for those we already know at compile time that the size is ok.
111  */
112 #define __ua_size(size)                                                 \
113         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
114
115 /*
116  * access_ok: - Checks if a user space pointer is valid
117  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
118  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
119  *        to write to a block, it is always safe to read from it.
120  * @addr: User space pointer to start of block to check
121  * @size: Size of block to check
122  *
123  * Context: User context only. This function may sleep if pagefaults are
124  *          enabled.
125  *
126  * Checks if a pointer to a block of memory in user space is valid.
127  *
128  * Returns true (nonzero) if the memory block may be valid, false (zero)
129  * if it is definitely invalid.
130  *
131  * Note that, depending on architecture, this function probably just
132  * checks that the pointer is in the user space range - after calling
133  * this function, memory access functions may still return -EFAULT.
134  */
135
136 #define __access_mask get_fs().seg
137
138 #define __access_ok(addr, size, mask)                                   \
139 ({                                                                      \
140         unsigned long __addr = (unsigned long) (addr);                  \
141         unsigned long __size = size;                                    \
142         unsigned long __mask = mask;                                    \
143         unsigned long __ok;                                             \
144                                                                         \
145         __chk_user_ptr(addr);                                           \
146         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
147                 __ua_size(__size)));                                    \
148         __ok == 0;                                                      \
149 })
150
151 #define access_ok(type, addr, size)                                     \
152         likely(__access_ok((addr), (size), __access_mask))
153
154 /*
155  * put_user: - Write a simple value into user space.
156  * @x:   Value to copy to user space.
157  * @ptr: Destination address, in user space.
158  *
159  * Context: User context only. This function may sleep if pagefaults are
160  *          enabled.
161  *
162  * This macro copies a single simple value from kernel space to user
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
167  * to the result of dereferencing @ptr.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  */
171 #define put_user(x,ptr) \
172         __put_user_check((x), (ptr), sizeof(*(ptr)))
173
174 /*
175  * get_user: - Get a simple variable from user space.
176  * @x:   Variable to store result.
177  * @ptr: Source address, in user space.
178  *
179  * Context: User context only. This function may sleep if pagefaults are
180  *          enabled.
181  *
182  * This macro copies a single simple variable from user space to kernel
183  * space.  It supports simple types like char and int, but not larger
184  * data types like structures or arrays.
185  *
186  * @ptr must have pointer-to-simple-variable type, and the result of
187  * dereferencing @ptr must be assignable to @x without a cast.
188  *
189  * Returns zero on success, or -EFAULT on error.
190  * On error, the variable @x is set to zero.
191  */
192 #define get_user(x,ptr) \
193         __get_user_check((x), (ptr), sizeof(*(ptr)))
194
195 /*
196  * __put_user: - Write a simple value into user space, with less checking.
197  * @x:   Value to copy to user space.
198  * @ptr: Destination address, in user space.
199  *
200  * Context: User context only. This function may sleep if pagefaults are
201  *          enabled.
202  *
203  * This macro copies a single simple value from kernel space to user
204  * space.  It supports simple types like char and int, but not larger
205  * data types like structures or arrays.
206  *
207  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
208  * to the result of dereferencing @ptr.
209  *
210  * Caller must check the pointer with access_ok() before calling this
211  * function.
212  *
213  * Returns zero on success, or -EFAULT on error.
214  */
215 #define __put_user(x,ptr) \
216         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
217
218 /*
219  * __get_user: - Get a simple variable from user space, with less checking.
220  * @x:   Variable to store result.
221  * @ptr: Source address, in user space.
222  *
223  * Context: User context only. This function may sleep if pagefaults are
224  *          enabled.
225  *
226  * This macro copies a single simple variable from user space to kernel
227  * space.  It supports simple types like char and int, but not larger
228  * data types like structures or arrays.
229  *
230  * @ptr must have pointer-to-simple-variable type, and the result of
231  * dereferencing @ptr must be assignable to @x without a cast.
232  *
233  * Caller must check the pointer with access_ok() before calling this
234  * function.
235  *
236  * Returns zero on success, or -EFAULT on error.
237  * On error, the variable @x is set to zero.
238  */
239 #define __get_user(x,ptr) \
240         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
241
242 struct __large_struct { unsigned long buf[100]; };
243 #define __m(x) (*(struct __large_struct __user *)(x))
244
245 /*
246  * Yuck.  We need two variants, one for 64bit operation and one
247  * for 32 bit mode and old iron.
248  */
249 #ifndef CONFIG_EVA
250 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
251 #else
252 /*
253  * Kernel specific functions for EVA. We need to use normal load instructions
254  * to read data from kernel when operating in EVA mode. We use these macros to
255  * avoid redefining __get_user_asm for EVA.
256  */
257 #undef _loadd
258 #undef _loadw
259 #undef _loadh
260 #undef _loadb
261 #ifdef CONFIG_32BIT
262 #define _loadd                  _loadw
263 #else
264 #define _loadd(reg, addr)       "ld " reg ", " addr
265 #endif
266 #define _loadw(reg, addr)       "lw " reg ", " addr
267 #define _loadh(reg, addr)       "lh " reg ", " addr
268 #define _loadb(reg, addr)       "lb " reg ", " addr
269
270 #define __get_kernel_common(val, size, ptr)                             \
271 do {                                                                    \
272         switch (size) {                                                 \
273         case 1: __get_data_asm(val, _loadb, ptr); break;                \
274         case 2: __get_data_asm(val, _loadh, ptr); break;                \
275         case 4: __get_data_asm(val, _loadw, ptr); break;                \
276         case 8: __GET_DW(val, _loadd, ptr); break;                      \
277         default: __get_user_unknown(); break;                           \
278         }                                                               \
279 } while (0)
280 #endif
281
282 #ifdef CONFIG_32BIT
283 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
284 #endif
285 #ifdef CONFIG_64BIT
286 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
287 #endif
288
289 extern void __get_user_unknown(void);
290
291 #define __get_user_common(val, size, ptr)                               \
292 do {                                                                    \
293         switch (size) {                                                 \
294         case 1: __get_data_asm(val, user_lb, ptr); break;               \
295         case 2: __get_data_asm(val, user_lh, ptr); break;               \
296         case 4: __get_data_asm(val, user_lw, ptr); break;               \
297         case 8: __GET_DW(val, user_ld, ptr); break;                     \
298         default: __get_user_unknown(); break;                           \
299         }                                                               \
300 } while (0)
301
302 #define __get_user_nocheck(x, ptr, size)                                \
303 ({                                                                      \
304         int __gu_err;                                                   \
305                                                                         \
306         if (eva_kernel_access()) {                                      \
307                 __get_kernel_common((x), size, ptr);                    \
308         } else {                                                        \
309                 __chk_user_ptr(ptr);                                    \
310                 __get_user_common((x), size, ptr);                      \
311         }                                                               \
312         __gu_err;                                                       \
313 })
314
315 #define __get_user_check(x, ptr, size)                                  \
316 ({                                                                      \
317         int __gu_err = -EFAULT;                                         \
318         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
319                                                                         \
320         might_fault();                                                  \
321         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
322                 if (eva_kernel_access())                                \
323                         __get_kernel_common((x), size, __gu_ptr);       \
324                 else                                                    \
325                         __get_user_common((x), size, __gu_ptr);         \
326         } else                                                          \
327                 (x) = 0;                                                \
328                                                                         \
329         __gu_err;                                                       \
330 })
331
332 #define __get_data_asm(val, insn, addr)                                 \
333 {                                                                       \
334         long __gu_tmp;                                                  \
335                                                                         \
336         __asm__ __volatile__(                                           \
337         "1:     "insn("%1", "%3")"                              \n"     \
338         "2:                                                     \n"     \
339         "       .insn                                           \n"     \
340         "       .section .fixup,\"ax\"                          \n"     \
341         "3:     li      %0, %4                                  \n"     \
342         "       move    %1, $0                                  \n"     \
343         "       j       2b                                      \n"     \
344         "       .previous                                       \n"     \
345         "       .section __ex_table,\"a\"                       \n"     \
346         "       "__UA_ADDR "\t1b, 3b                            \n"     \
347         "       .previous                                       \n"     \
348         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
349         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
350                                                                         \
351         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
352 }
353
354 /*
355  * Get a long long 64 using 32 bit registers.
356  */
357 #define __get_data_asm_ll32(val, insn, addr)                            \
358 {                                                                       \
359         union {                                                         \
360                 unsigned long long      l;                              \
361                 __typeof__(*(addr))     t;                              \
362         } __gu_tmp;                                                     \
363                                                                         \
364         __asm__ __volatile__(                                           \
365         "1:     " insn("%1", "(%3)")"                           \n"     \
366         "2:     " insn("%D1", "4(%3)")"                         \n"     \
367         "3:                                                     \n"     \
368         "       .insn                                           \n"     \
369         "       .section        .fixup,\"ax\"                   \n"     \
370         "4:     li      %0, %4                                  \n"     \
371         "       move    %1, $0                                  \n"     \
372         "       move    %D1, $0                                 \n"     \
373         "       j       3b                                      \n"     \
374         "       .previous                                       \n"     \
375         "       .section        __ex_table,\"a\"                \n"     \
376         "       " __UA_ADDR "   1b, 4b                          \n"     \
377         "       " __UA_ADDR "   2b, 4b                          \n"     \
378         "       .previous                                       \n"     \
379         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
380         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
381                                                                         \
382         (val) = __gu_tmp.t;                                             \
383 }
384
385 #ifndef CONFIG_EVA
386 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
387 #else
388 /*
389  * Kernel specific functions for EVA. We need to use normal load instructions
390  * to read data from kernel when operating in EVA mode. We use these macros to
391  * avoid redefining __get_data_asm for EVA.
392  */
393 #undef _stored
394 #undef _storew
395 #undef _storeh
396 #undef _storeb
397 #ifdef CONFIG_32BIT
398 #define _stored                 _storew
399 #else
400 #define _stored(reg, addr)      "ld " reg ", " addr
401 #endif
402
403 #define _storew(reg, addr)      "sw " reg ", " addr
404 #define _storeh(reg, addr)      "sh " reg ", " addr
405 #define _storeb(reg, addr)      "sb " reg ", " addr
406
407 #define __put_kernel_common(ptr, size)                                  \
408 do {                                                                    \
409         switch (size) {                                                 \
410         case 1: __put_data_asm(_storeb, ptr); break;                    \
411         case 2: __put_data_asm(_storeh, ptr); break;                    \
412         case 4: __put_data_asm(_storew, ptr); break;                    \
413         case 8: __PUT_DW(_stored, ptr); break;                          \
414         default: __put_user_unknown(); break;                           \
415         }                                                               \
416 } while(0)
417 #endif
418
419 /*
420  * Yuck.  We need two variants, one for 64bit operation and one
421  * for 32 bit mode and old iron.
422  */
423 #ifdef CONFIG_32BIT
424 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
425 #endif
426 #ifdef CONFIG_64BIT
427 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
428 #endif
429
430 #define __put_user_common(ptr, size)                                    \
431 do {                                                                    \
432         switch (size) {                                                 \
433         case 1: __put_data_asm(user_sb, ptr); break;                    \
434         case 2: __put_data_asm(user_sh, ptr); break;                    \
435         case 4: __put_data_asm(user_sw, ptr); break;                    \
436         case 8: __PUT_DW(user_sd, ptr); break;                          \
437         default: __put_user_unknown(); break;                           \
438         }                                                               \
439 } while (0)
440
441 #define __put_user_nocheck(x, ptr, size)                                \
442 ({                                                                      \
443         __typeof__(*(ptr)) __pu_val;                                    \
444         int __pu_err = 0;                                               \
445                                                                         \
446         __pu_val = (x);                                                 \
447         if (eva_kernel_access()) {                                      \
448                 __put_kernel_common(ptr, size);                         \
449         } else {                                                        \
450                 __chk_user_ptr(ptr);                                    \
451                 __put_user_common(ptr, size);                           \
452         }                                                               \
453         __pu_err;                                                       \
454 })
455
456 #define __put_user_check(x, ptr, size)                                  \
457 ({                                                                      \
458         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
459         __typeof__(*(ptr)) __pu_val = (x);                              \
460         int __pu_err = -EFAULT;                                         \
461                                                                         \
462         might_fault();                                                  \
463         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
464                 if (eva_kernel_access())                                \
465                         __put_kernel_common(__pu_addr, size);           \
466                 else                                                    \
467                         __put_user_common(__pu_addr, size);             \
468         }                                                               \
469                                                                         \
470         __pu_err;                                                       \
471 })
472
473 #define __put_data_asm(insn, ptr)                                       \
474 {                                                                       \
475         __asm__ __volatile__(                                           \
476         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
477         "2:                                                     \n"     \
478         "       .insn                                           \n"     \
479         "       .section        .fixup,\"ax\"                   \n"     \
480         "3:     li      %0, %4                                  \n"     \
481         "       j       2b                                      \n"     \
482         "       .previous                                       \n"     \
483         "       .section        __ex_table,\"a\"                \n"     \
484         "       " __UA_ADDR "   1b, 3b                          \n"     \
485         "       .previous                                       \n"     \
486         : "=r" (__pu_err)                                               \
487         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
488           "i" (-EFAULT));                                               \
489 }
490
491 #define __put_data_asm_ll32(insn, ptr)                                  \
492 {                                                                       \
493         __asm__ __volatile__(                                           \
494         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
495         "2:     "insn("%D2", "4(%3)")"                          \n"     \
496         "3:                                                     \n"     \
497         "       .insn                                           \n"     \
498         "       .section        .fixup,\"ax\"                   \n"     \
499         "4:     li      %0, %4                                  \n"     \
500         "       j       3b                                      \n"     \
501         "       .previous                                       \n"     \
502         "       .section        __ex_table,\"a\"                \n"     \
503         "       " __UA_ADDR "   1b, 4b                          \n"     \
504         "       " __UA_ADDR "   2b, 4b                          \n"     \
505         "       .previous"                                              \
506         : "=r" (__pu_err)                                               \
507         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
508           "i" (-EFAULT));                                               \
509 }
510
511 extern void __put_user_unknown(void);
512
513 /*
514  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
515  * EVA unaligned access is handled in the ADE exception handler.
516  */
517 #ifndef CONFIG_EVA
518 /*
519  * put_user_unaligned: - Write a simple value into user space.
520  * @x:   Value to copy to user space.
521  * @ptr: Destination address, in user space.
522  *
523  * Context: User context only. This function may sleep if pagefaults are
524  *          enabled.
525  *
526  * This macro copies a single simple value from kernel space to user
527  * space.  It supports simple types like char and int, but not larger
528  * data types like structures or arrays.
529  *
530  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
531  * to the result of dereferencing @ptr.
532  *
533  * Returns zero on success, or -EFAULT on error.
534  */
535 #define put_user_unaligned(x,ptr)       \
536         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
537
538 /*
539  * get_user_unaligned: - Get a simple variable from user space.
540  * @x:   Variable to store result.
541  * @ptr: Source address, in user space.
542  *
543  * Context: User context only. This function may sleep if pagefaults are
544  *          enabled.
545  *
546  * This macro copies a single simple variable from user space to kernel
547  * space.  It supports simple types like char and int, but not larger
548  * data types like structures or arrays.
549  *
550  * @ptr must have pointer-to-simple-variable type, and the result of
551  * dereferencing @ptr must be assignable to @x without a cast.
552  *
553  * Returns zero on success, or -EFAULT on error.
554  * On error, the variable @x is set to zero.
555  */
556 #define get_user_unaligned(x,ptr) \
557         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
558
559 /*
560  * __put_user_unaligned: - Write a simple value into user space, with less checking.
561  * @x:   Value to copy to user space.
562  * @ptr: Destination address, in user space.
563  *
564  * Context: User context only. This function may sleep if pagefaults are
565  *          enabled.
566  *
567  * This macro copies a single simple value from kernel space to user
568  * space.  It supports simple types like char and int, but not larger
569  * data types like structures or arrays.
570  *
571  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
572  * to the result of dereferencing @ptr.
573  *
574  * Caller must check the pointer with access_ok() before calling this
575  * function.
576  *
577  * Returns zero on success, or -EFAULT on error.
578  */
579 #define __put_user_unaligned(x,ptr) \
580         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
581
582 /*
583  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
584  * @x:   Variable to store result.
585  * @ptr: Source address, in user space.
586  *
587  * Context: User context only. This function may sleep if pagefaults are
588  *          enabled.
589  *
590  * This macro copies a single simple variable from user space to kernel
591  * space.  It supports simple types like char and int, but not larger
592  * data types like structures or arrays.
593  *
594  * @ptr must have pointer-to-simple-variable type, and the result of
595  * dereferencing @ptr must be assignable to @x without a cast.
596  *
597  * Caller must check the pointer with access_ok() before calling this
598  * function.
599  *
600  * Returns zero on success, or -EFAULT on error.
601  * On error, the variable @x is set to zero.
602  */
603 #define __get_user_unaligned(x,ptr) \
604         __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
605
606 /*
607  * Yuck.  We need two variants, one for 64bit operation and one
608  * for 32 bit mode and old iron.
609  */
610 #ifdef CONFIG_32BIT
611 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
612         __get_user_unaligned_asm_ll32(val, ptr)
613 #endif
614 #ifdef CONFIG_64BIT
615 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
616         __get_user_unaligned_asm(val, "uld", ptr)
617 #endif
618
619 extern void __get_user_unaligned_unknown(void);
620
621 #define __get_user_unaligned_common(val, size, ptr)                     \
622 do {                                                                    \
623         switch (size) {                                                 \
624         case 1: __get_data_asm(val, "lb", ptr); break;                  \
625         case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;       \
626         case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;       \
627         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
628         default: __get_user_unaligned_unknown(); break;                 \
629         }                                                               \
630 } while (0)
631
632 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
633 ({                                                                      \
634         int __gu_err;                                                   \
635                                                                         \
636         __get_user_unaligned_common((x), size, ptr);                    \
637         __gu_err;                                                       \
638 })
639
640 #define __get_user_unaligned_check(x,ptr,size)                          \
641 ({                                                                      \
642         int __gu_err = -EFAULT;                                         \
643         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
644                                                                         \
645         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
646                 __get_user_unaligned_common((x), size, __gu_ptr);       \
647                                                                         \
648         __gu_err;                                                       \
649 })
650
651 #define __get_data_unaligned_asm(val, insn, addr)                       \
652 {                                                                       \
653         long __gu_tmp;                                                  \
654                                                                         \
655         __asm__ __volatile__(                                           \
656         "1:     " insn "        %1, %3                          \n"     \
657         "2:                                                     \n"     \
658         "       .insn                                           \n"     \
659         "       .section .fixup,\"ax\"                          \n"     \
660         "3:     li      %0, %4                                  \n"     \
661         "       move    %1, $0                                  \n"     \
662         "       j       2b                                      \n"     \
663         "       .previous                                       \n"     \
664         "       .section __ex_table,\"a\"                       \n"     \
665         "       "__UA_ADDR "\t1b, 3b                            \n"     \
666         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
667         "       .previous                                       \n"     \
668         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
669         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
670                                                                         \
671         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
672 }
673
674 /*
675  * Get a long long 64 using 32 bit registers.
676  */
677 #define __get_user_unaligned_asm_ll32(val, addr)                        \
678 {                                                                       \
679         unsigned long long __gu_tmp;                                    \
680                                                                         \
681         __asm__ __volatile__(                                           \
682         "1:     ulw     %1, (%3)                                \n"     \
683         "2:     ulw     %D1, 4(%3)                              \n"     \
684         "       move    %0, $0                                  \n"     \
685         "3:                                                     \n"     \
686         "       .insn                                           \n"     \
687         "       .section        .fixup,\"ax\"                   \n"     \
688         "4:     li      %0, %4                                  \n"     \
689         "       move    %1, $0                                  \n"     \
690         "       move    %D1, $0                                 \n"     \
691         "       j       3b                                      \n"     \
692         "       .previous                                       \n"     \
693         "       .section        __ex_table,\"a\"                \n"     \
694         "       " __UA_ADDR "   1b, 4b                          \n"     \
695         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
696         "       " __UA_ADDR "   2b, 4b                          \n"     \
697         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
698         "       .previous                                       \n"     \
699         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
700         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
701         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
702 }
703
704 /*
705  * Yuck.  We need two variants, one for 64bit operation and one
706  * for 32 bit mode and old iron.
707  */
708 #ifdef CONFIG_32BIT
709 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
710 #endif
711 #ifdef CONFIG_64BIT
712 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
713 #endif
714
715 #define __put_user_unaligned_common(ptr, size)                          \
716 do {                                                                    \
717         switch (size) {                                                 \
718         case 1: __put_data_asm("sb", ptr); break;                       \
719         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
720         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
721         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
722         default: __put_user_unaligned_unknown(); break;                 \
723 } while (0)
724
725 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
726 ({                                                                      \
727         __typeof__(*(ptr)) __pu_val;                                    \
728         int __pu_err = 0;                                               \
729                                                                         \
730         __pu_val = (x);                                                 \
731         __put_user_unaligned_common(ptr, size);                         \
732         __pu_err;                                                       \
733 })
734
735 #define __put_user_unaligned_check(x,ptr,size)                          \
736 ({                                                                      \
737         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
738         __typeof__(*(ptr)) __pu_val = (x);                              \
739         int __pu_err = -EFAULT;                                         \
740                                                                         \
741         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
742                 __put_user_unaligned_common(__pu_addr, size);           \
743                                                                         \
744         __pu_err;                                                       \
745 })
746
747 #define __put_user_unaligned_asm(insn, ptr)                             \
748 {                                                                       \
749         __asm__ __volatile__(                                           \
750         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
751         "2:                                                     \n"     \
752         "       .insn                                           \n"     \
753         "       .section        .fixup,\"ax\"                   \n"     \
754         "3:     li      %0, %4                                  \n"     \
755         "       j       2b                                      \n"     \
756         "       .previous                                       \n"     \
757         "       .section        __ex_table,\"a\"                \n"     \
758         "       " __UA_ADDR "   1b, 3b                          \n"     \
759         "       .previous                                       \n"     \
760         : "=r" (__pu_err)                                               \
761         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
762           "i" (-EFAULT));                                               \
763 }
764
765 #define __put_user_unaligned_asm_ll32(ptr)                              \
766 {                                                                       \
767         __asm__ __volatile__(                                           \
768         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
769         "2:     sw      %D2, 4(%3)                              \n"     \
770         "3:                                                     \n"     \
771         "       .insn                                           \n"     \
772         "       .section        .fixup,\"ax\"                   \n"     \
773         "4:     li      %0, %4                                  \n"     \
774         "       j       3b                                      \n"     \
775         "       .previous                                       \n"     \
776         "       .section        __ex_table,\"a\"                \n"     \
777         "       " __UA_ADDR "   1b, 4b                          \n"     \
778         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
779         "       " __UA_ADDR "   2b, 4b                          \n"     \
780         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
781         "       .previous"                                              \
782         : "=r" (__pu_err)                                               \
783         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
784           "i" (-EFAULT));                                               \
785 }
786
787 extern void __put_user_unaligned_unknown(void);
788 #endif
789
790 /*
791  * We're generating jump to subroutines which will be outside the range of
792  * jump instructions
793  */
794 #ifdef MODULE
795 #define __MODULE_JAL(destination)                                       \
796         ".set\tnoat\n\t"                                                \
797         __UA_LA "\t$1, " #destination "\n\t"                            \
798         "jalr\t$1\n\t"                                                  \
799         ".set\tat\n\t"
800 #else
801 #define __MODULE_JAL(destination)                                       \
802         "jal\t" #destination "\n\t"
803 #endif
804
805 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
806                                               defined(CONFIG_CPU_HAS_PREFETCH))
807 #define DADDI_SCRATCH "$3"
808 #else
809 #define DADDI_SCRATCH "$0"
810 #endif
811
812 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
813
814 #ifndef CONFIG_EVA
815 #define __invoke_copy_to_user(to, from, n)                              \
816 ({                                                                      \
817         register void __user *__cu_to_r __asm__("$4");                  \
818         register const void *__cu_from_r __asm__("$5");                 \
819         register long __cu_len_r __asm__("$6");                         \
820                                                                         \
821         __cu_to_r = (to);                                               \
822         __cu_from_r = (from);                                           \
823         __cu_len_r = (n);                                               \
824         __asm__ __volatile__(                                           \
825         __MODULE_JAL(__copy_user)                                       \
826         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
827         :                                                               \
828         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
829           DADDI_SCRATCH, "memory");                                     \
830         __cu_len_r;                                                     \
831 })
832
833 #define __invoke_copy_to_kernel(to, from, n)                            \
834         __invoke_copy_to_user(to, from, n)
835
836 #endif
837
838 /*
839  * __copy_to_user: - Copy a block of data into user space, with less checking.
840  * @to:   Destination address, in user space.
841  * @from: Source address, in kernel space.
842  * @n:    Number of bytes to copy.
843  *
844  * Context: User context only. This function may sleep if pagefaults are
845  *          enabled.
846  *
847  * Copy data from kernel space to user space.  Caller must check
848  * the specified block with access_ok() before calling this function.
849  *
850  * Returns number of bytes that could not be copied.
851  * On success, this will be zero.
852  */
853 #define __copy_to_user(to, from, n)                                     \
854 ({                                                                      \
855         void __user *__cu_to;                                           \
856         const void *__cu_from;                                          \
857         long __cu_len;                                                  \
858                                                                         \
859         __cu_to = (to);                                                 \
860         __cu_from = (from);                                             \
861         __cu_len = (n);                                                 \
862                                                                         \
863         check_object_size(__cu_from, __cu_len, true);                   \
864         might_fault();                                                  \
865                                                                         \
866         if (eva_kernel_access())                                        \
867                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
868                                                    __cu_len);           \
869         else                                                            \
870                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
871                                                  __cu_len);             \
872         __cu_len;                                                       \
873 })
874
875 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
876
877 #define __copy_to_user_inatomic(to, from, n)                            \
878 ({                                                                      \
879         void __user *__cu_to;                                           \
880         const void *__cu_from;                                          \
881         long __cu_len;                                                  \
882                                                                         \
883         __cu_to = (to);                                                 \
884         __cu_from = (from);                                             \
885         __cu_len = (n);                                                 \
886                                                                         \
887         check_object_size(__cu_from, __cu_len, true);                   \
888                                                                         \
889         if (eva_kernel_access())                                        \
890                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
891                                                    __cu_len);           \
892         else                                                            \
893                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
894                                                  __cu_len);             \
895         __cu_len;                                                       \
896 })
897
898 #define __copy_from_user_inatomic(to, from, n)                          \
899 ({                                                                      \
900         void *__cu_to;                                                  \
901         const void __user *__cu_from;                                   \
902         long __cu_len;                                                  \
903                                                                         \
904         __cu_to = (to);                                                 \
905         __cu_from = (from);                                             \
906         __cu_len = (n);                                                 \
907                                                                         \
908         check_object_size(__cu_to, __cu_len, false);                    \
909                                                                         \
910         if (eva_kernel_access())                                        \
911                 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
912                                                               __cu_from,\
913                                                               __cu_len);\
914         else                                                            \
915                 __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
916                                                             __cu_from,  \
917                                                             __cu_len);  \
918         __cu_len;                                                       \
919 })
920
921 /*
922  * copy_to_user: - Copy a block of data into user space.
923  * @to:   Destination address, in user space.
924  * @from: Source address, in kernel space.
925  * @n:    Number of bytes to copy.
926  *
927  * Context: User context only. This function may sleep if pagefaults are
928  *          enabled.
929  *
930  * Copy data from kernel space to user space.
931  *
932  * Returns number of bytes that could not be copied.
933  * On success, this will be zero.
934  */
935 #define copy_to_user(to, from, n)                                       \
936 ({                                                                      \
937         void __user *__cu_to;                                           \
938         const void *__cu_from;                                          \
939         long __cu_len;                                                  \
940                                                                         \
941         __cu_to = (to);                                                 \
942         __cu_from = (from);                                             \
943         __cu_len = (n);                                                 \
944                                                                         \
945         check_object_size(__cu_from, __cu_len, true);                   \
946                                                                         \
947         if (eva_kernel_access()) {                                      \
948                 __cu_len = __invoke_copy_to_kernel(__cu_to,             \
949                                                    __cu_from,           \
950                                                    __cu_len);           \
951         } else {                                                        \
952                 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
953                         might_fault();                                  \
954                         __cu_len = __invoke_copy_to_user(__cu_to,       \
955                                                          __cu_from,     \
956                                                          __cu_len);     \
957                 }                                                       \
958         }                                                               \
959         __cu_len;                                                       \
960 })
961
962 #ifndef CONFIG_EVA
963
964 #define __invoke_copy_from_user(to, from, n)                            \
965 ({                                                                      \
966         register void *__cu_to_r __asm__("$4");                         \
967         register const void __user *__cu_from_r __asm__("$5");          \
968         register long __cu_len_r __asm__("$6");                         \
969                                                                         \
970         __cu_to_r = (to);                                               \
971         __cu_from_r = (from);                                           \
972         __cu_len_r = (n);                                               \
973         __asm__ __volatile__(                                           \
974         ".set\tnoreorder\n\t"                                           \
975         __MODULE_JAL(__copy_user)                                       \
976         ".set\tnoat\n\t"                                                \
977         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
978         ".set\tat\n\t"                                                  \
979         ".set\treorder"                                                 \
980         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
981         :                                                               \
982         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
983           DADDI_SCRATCH, "memory");                                     \
984         __cu_len_r;                                                     \
985 })
986
987 #define __invoke_copy_from_kernel(to, from, n)                          \
988         __invoke_copy_from_user(to, from, n)
989
990 /* For userland <-> userland operations */
991 #define ___invoke_copy_in_user(to, from, n)                             \
992         __invoke_copy_from_user(to, from, n)
993
994 /* For kernel <-> kernel operations */
995 #define ___invoke_copy_in_kernel(to, from, n)                           \
996         __invoke_copy_from_user(to, from, n)
997
998 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
999 ({                                                                      \
1000         register void *__cu_to_r __asm__("$4");                         \
1001         register const void __user *__cu_from_r __asm__("$5");          \
1002         register long __cu_len_r __asm__("$6");                         \
1003                                                                         \
1004         __cu_to_r = (to);                                               \
1005         __cu_from_r = (from);                                           \
1006         __cu_len_r = (n);                                               \
1007         __asm__ __volatile__(                                           \
1008         ".set\tnoreorder\n\t"                                           \
1009         __MODULE_JAL(__copy_user_inatomic)                              \
1010         ".set\tnoat\n\t"                                                \
1011         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1012         ".set\tat\n\t"                                                  \
1013         ".set\treorder"                                                 \
1014         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1015         :                                                               \
1016         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1017           DADDI_SCRATCH, "memory");                                     \
1018         __cu_len_r;                                                     \
1019 })
1020
1021 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1022         __invoke_copy_from_user_inatomic(to, from, n)                   \
1023
1024 #else
1025
1026 /* EVA specific functions */
1027
1028 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1029                                        size_t __n);
1030 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1031                                    size_t __n);
1032 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1033                                  size_t __n);
1034 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1035
1036 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)      \
1037 ({                                                                      \
1038         register void *__cu_to_r __asm__("$4");                         \
1039         register const void __user *__cu_from_r __asm__("$5");          \
1040         register long __cu_len_r __asm__("$6");                         \
1041                                                                         \
1042         __cu_to_r = (to);                                               \
1043         __cu_from_r = (from);                                           \
1044         __cu_len_r = (n);                                               \
1045         __asm__ __volatile__(                                           \
1046         ".set\tnoreorder\n\t"                                           \
1047         __MODULE_JAL(func_ptr)                                          \
1048         ".set\tnoat\n\t"                                                \
1049         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1050         ".set\tat\n\t"                                                  \
1051         ".set\treorder"                                                 \
1052         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1053         :                                                               \
1054         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1055           DADDI_SCRATCH, "memory");                                     \
1056         __cu_len_r;                                                     \
1057 })
1058
1059 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)        \
1060 ({                                                                      \
1061         register void *__cu_to_r __asm__("$4");                         \
1062         register const void __user *__cu_from_r __asm__("$5");          \
1063         register long __cu_len_r __asm__("$6");                         \
1064                                                                         \
1065         __cu_to_r = (to);                                               \
1066         __cu_from_r = (from);                                           \
1067         __cu_len_r = (n);                                               \
1068         __asm__ __volatile__(                                           \
1069         __MODULE_JAL(func_ptr)                                          \
1070         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1071         :                                                               \
1072         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1073           DADDI_SCRATCH, "memory");                                     \
1074         __cu_len_r;                                                     \
1075 })
1076
1077 /*
1078  * Source or destination address is in userland. We need to go through
1079  * the TLB
1080  */
1081 #define __invoke_copy_from_user(to, from, n)                            \
1082         __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1083
1084 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
1085         __invoke_copy_from_user_eva_generic(to, from, n,                \
1086                                             __copy_user_inatomic_eva)
1087
1088 #define __invoke_copy_to_user(to, from, n)                              \
1089         __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1090
1091 #define ___invoke_copy_in_user(to, from, n)                             \
1092         __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1093
1094 /*
1095  * Source or destination address in the kernel. We are not going through
1096  * the TLB
1097  */
1098 #define __invoke_copy_from_kernel(to, from, n)                          \
1099         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1100
1101 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1102         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1103
1104 #define __invoke_copy_to_kernel(to, from, n)                            \
1105         __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1106
1107 #define ___invoke_copy_in_kernel(to, from, n)                           \
1108         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1109
1110 #endif /* CONFIG_EVA */
1111
1112 /*
1113  * __copy_from_user: - Copy a block of data from user space, with less checking.
1114  * @to:   Destination address, in kernel space.
1115  * @from: Source address, in user space.
1116  * @n:    Number of bytes to copy.
1117  *
1118  * Context: User context only. This function may sleep if pagefaults are
1119  *          enabled.
1120  *
1121  * Copy data from user space to kernel space.  Caller must check
1122  * the specified block with access_ok() before calling this function.
1123  *
1124  * Returns number of bytes that could not be copied.
1125  * On success, this will be zero.
1126  *
1127  * If some data could not be copied, this function will pad the copied
1128  * data to the requested size using zero bytes.
1129  */
1130 #define __copy_from_user(to, from, n)                                   \
1131 ({                                                                      \
1132         void *__cu_to;                                                  \
1133         const void __user *__cu_from;                                   \
1134         long __cu_len;                                                  \
1135                                                                         \
1136         __cu_to = (to);                                                 \
1137         __cu_from = (from);                                             \
1138         __cu_len = (n);                                                 \
1139                                                                         \
1140         check_object_size(__cu_to, __cu_len, false);                    \
1141                                                                         \
1142         if (eva_kernel_access()) {                                      \
1143                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1144                                                      __cu_from,         \
1145                                                      __cu_len);         \
1146         } else {                                                        \
1147                 might_fault();                                          \
1148                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
1149                                                    __cu_len);           \
1150         }                                                               \
1151         __cu_len;                                                       \
1152 })
1153
1154 /*
1155  * copy_from_user: - Copy a block of data from user space.
1156  * @to:   Destination address, in kernel space.
1157  * @from: Source address, in user space.
1158  * @n:    Number of bytes to copy.
1159  *
1160  * Context: User context only. This function may sleep if pagefaults are
1161  *          enabled.
1162  *
1163  * Copy data from user space to kernel space.
1164  *
1165  * Returns number of bytes that could not be copied.
1166  * On success, this will be zero.
1167  *
1168  * If some data could not be copied, this function will pad the copied
1169  * data to the requested size using zero bytes.
1170  */
1171 #define copy_from_user(to, from, n)                                     \
1172 ({                                                                      \
1173         void *__cu_to;                                                  \
1174         const void __user *__cu_from;                                   \
1175         long __cu_len;                                                  \
1176                                                                         \
1177         __cu_to = (to);                                                 \
1178         __cu_from = (from);                                             \
1179         __cu_len = (n);                                                 \
1180                                                                         \
1181         check_object_size(__cu_to, __cu_len, false);                    \
1182                                                                         \
1183         if (eva_kernel_access()) {                                      \
1184                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1185                                                      __cu_from,         \
1186                                                      __cu_len);         \
1187         } else {                                                        \
1188                 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
1189                         might_fault();                                  \
1190                         __cu_len = __invoke_copy_from_user(__cu_to,     \
1191                                                            __cu_from,   \
1192                                                            __cu_len);   \
1193                 } else {                                                \
1194                         memset(__cu_to, 0, __cu_len);                   \
1195                 }                                                       \
1196         }                                                               \
1197         __cu_len;                                                       \
1198 })
1199
1200 #define __copy_in_user(to, from, n)                                     \
1201 ({                                                                      \
1202         void __user *__cu_to;                                           \
1203         const void __user *__cu_from;                                   \
1204         long __cu_len;                                                  \
1205                                                                         \
1206         __cu_to = (to);                                                 \
1207         __cu_from = (from);                                             \
1208         __cu_len = (n);                                                 \
1209         if (eva_kernel_access()) {                                      \
1210                 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1211                                                     __cu_len);          \
1212         } else {                                                        \
1213                 might_fault();                                          \
1214                 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
1215                                                   __cu_len);            \
1216         }                                                               \
1217         __cu_len;                                                       \
1218 })
1219
1220 #define copy_in_user(to, from, n)                                       \
1221 ({                                                                      \
1222         void __user *__cu_to;                                           \
1223         const void __user *__cu_from;                                   \
1224         long __cu_len;                                                  \
1225                                                                         \
1226         __cu_to = (to);                                                 \
1227         __cu_from = (from);                                             \
1228         __cu_len = (n);                                                 \
1229         if (eva_kernel_access()) {                                      \
1230                 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
1231                                                     __cu_len);          \
1232         } else {                                                        \
1233                 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1234                            access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1235                         might_fault();                                  \
1236                         __cu_len = ___invoke_copy_in_user(__cu_to,      \
1237                                                           __cu_from,    \
1238                                                           __cu_len);    \
1239                 }                                                       \
1240         }                                                               \
1241         __cu_len;                                                       \
1242 })
1243
1244 /*
1245  * __clear_user: - Zero a block of memory in user space, with less checking.
1246  * @to:   Destination address, in user space.
1247  * @n:    Number of bytes to zero.
1248  *
1249  * Zero a block of memory in user space.  Caller must check
1250  * the specified block with access_ok() before calling this function.
1251  *
1252  * Returns number of bytes that could not be cleared.
1253  * On success, this will be zero.
1254  */
1255 static inline __kernel_size_t
1256 __clear_user(void __user *addr, __kernel_size_t size)
1257 {
1258         __kernel_size_t res;
1259
1260         if (eva_kernel_access()) {
1261                 __asm__ __volatile__(
1262                         "move\t$4, %1\n\t"
1263                         "move\t$5, $0\n\t"
1264                         "move\t$6, %2\n\t"
1265                         __MODULE_JAL(__bzero_kernel)
1266                         "move\t%0, $6"
1267                         : "=r" (res)
1268                         : "r" (addr), "r" (size)
1269                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1270         } else {
1271                 might_fault();
1272                 __asm__ __volatile__(
1273                         "move\t$4, %1\n\t"
1274                         "move\t$5, $0\n\t"
1275                         "move\t$6, %2\n\t"
1276                         __MODULE_JAL(__bzero)
1277                         "move\t%0, $6"
1278                         : "=r" (res)
1279                         : "r" (addr), "r" (size)
1280                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1281         }
1282
1283         return res;
1284 }
1285
1286 #define clear_user(addr,n)                                              \
1287 ({                                                                      \
1288         void __user * __cl_addr = (addr);                               \
1289         unsigned long __cl_size = (n);                                  \
1290         if (__cl_size && access_ok(VERIFY_WRITE,                        \
1291                                         __cl_addr, __cl_size))          \
1292                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
1293         __cl_size;                                                      \
1294 })
1295
1296 /*
1297  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1298  * @dst:   Destination address, in kernel space.  This buffer must be at
1299  *         least @count bytes long.
1300  * @src:   Source address, in user space.
1301  * @count: Maximum number of bytes to copy, including the trailing NUL.
1302  *
1303  * Copies a NUL-terminated string from userspace to kernel space.
1304  * Caller must check the specified block with access_ok() before calling
1305  * this function.
1306  *
1307  * On success, returns the length of the string (not including the trailing
1308  * NUL).
1309  *
1310  * If access to userspace fails, returns -EFAULT (some data may have been
1311  * copied).
1312  *
1313  * If @count is smaller than the length of the string, copies @count bytes
1314  * and returns @count.
1315  */
1316 static inline long
1317 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1318 {
1319         long res;
1320
1321         if (eva_kernel_access()) {
1322                 __asm__ __volatile__(
1323                         "move\t$4, %1\n\t"
1324                         "move\t$5, %2\n\t"
1325                         "move\t$6, %3\n\t"
1326                         __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1327                         "move\t%0, $2"
1328                         : "=r" (res)
1329                         : "r" (__to), "r" (__from), "r" (__len)
1330                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1331         } else {
1332                 might_fault();
1333                 __asm__ __volatile__(
1334                         "move\t$4, %1\n\t"
1335                         "move\t$5, %2\n\t"
1336                         "move\t$6, %3\n\t"
1337                         __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1338                         "move\t%0, $2"
1339                         : "=r" (res)
1340                         : "r" (__to), "r" (__from), "r" (__len)
1341                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1342         }
1343
1344         return res;
1345 }
1346
1347 /*
1348  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1349  * @dst:   Destination address, in kernel space.  This buffer must be at
1350  *         least @count bytes long.
1351  * @src:   Source address, in user space.
1352  * @count: Maximum number of bytes to copy, including the trailing NUL.
1353  *
1354  * Copies a NUL-terminated string from userspace to kernel space.
1355  *
1356  * On success, returns the length of the string (not including the trailing
1357  * NUL).
1358  *
1359  * If access to userspace fails, returns -EFAULT (some data may have been
1360  * copied).
1361  *
1362  * If @count is smaller than the length of the string, copies @count bytes
1363  * and returns @count.
1364  */
1365 static inline long
1366 strncpy_from_user(char *__to, const char __user *__from, long __len)
1367 {
1368         long res;
1369
1370         if (eva_kernel_access()) {
1371                 __asm__ __volatile__(
1372                         "move\t$4, %1\n\t"
1373                         "move\t$5, %2\n\t"
1374                         "move\t$6, %3\n\t"
1375                         __MODULE_JAL(__strncpy_from_kernel_asm)
1376                         "move\t%0, $2"
1377                         : "=r" (res)
1378                         : "r" (__to), "r" (__from), "r" (__len)
1379                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1380         } else {
1381                 might_fault();
1382                 __asm__ __volatile__(
1383                         "move\t$4, %1\n\t"
1384                         "move\t$5, %2\n\t"
1385                         "move\t$6, %3\n\t"
1386                         __MODULE_JAL(__strncpy_from_user_asm)
1387                         "move\t%0, $2"
1388                         : "=r" (res)
1389                         : "r" (__to), "r" (__from), "r" (__len)
1390                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1391         }
1392
1393         return res;
1394 }
1395
1396 /*
1397  * strlen_user: - Get the size of a string in user space.
1398  * @str: The string to measure.
1399  *
1400  * Context: User context only. This function may sleep if pagefaults are
1401  *          enabled.
1402  *
1403  * Get the size of a NUL-terminated string in user space.
1404  *
1405  * Returns the size of the string INCLUDING the terminating NUL.
1406  * On exception, returns 0.
1407  *
1408  * If there is a limit on the length of a valid string, you may wish to
1409  * consider using strnlen_user() instead.
1410  */
1411 static inline long strlen_user(const char __user *s)
1412 {
1413         long res;
1414
1415         if (eva_kernel_access()) {
1416                 __asm__ __volatile__(
1417                         "move\t$4, %1\n\t"
1418                         __MODULE_JAL(__strlen_kernel_asm)
1419                         "move\t%0, $2"
1420                         : "=r" (res)
1421                         : "r" (s)
1422                         : "$2", "$4", __UA_t0, "$31");
1423         } else {
1424                 might_fault();
1425                 __asm__ __volatile__(
1426                         "move\t$4, %1\n\t"
1427                         __MODULE_JAL(__strlen_user_asm)
1428                         "move\t%0, $2"
1429                         : "=r" (res)
1430                         : "r" (s)
1431                         : "$2", "$4", __UA_t0, "$31");
1432         }
1433
1434         return res;
1435 }
1436
1437 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1438 static inline long __strnlen_user(const char __user *s, long n)
1439 {
1440         long res;
1441
1442         if (eva_kernel_access()) {
1443                 __asm__ __volatile__(
1444                         "move\t$4, %1\n\t"
1445                         "move\t$5, %2\n\t"
1446                         __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1447                         "move\t%0, $2"
1448                         : "=r" (res)
1449                         : "r" (s), "r" (n)
1450                         : "$2", "$4", "$5", __UA_t0, "$31");
1451         } else {
1452                 might_fault();
1453                 __asm__ __volatile__(
1454                         "move\t$4, %1\n\t"
1455                         "move\t$5, %2\n\t"
1456                         __MODULE_JAL(__strnlen_user_nocheck_asm)
1457                         "move\t%0, $2"
1458                         : "=r" (res)
1459                         : "r" (s), "r" (n)
1460                         : "$2", "$4", "$5", __UA_t0, "$31");
1461         }
1462
1463         return res;
1464 }
1465
1466 /*
1467  * strnlen_user: - Get the size of a string in user space.
1468  * @str: The string to measure.
1469  *
1470  * Context: User context only. This function may sleep if pagefaults are
1471  *          enabled.
1472  *
1473  * Get the size of a NUL-terminated string in user space.
1474  *
1475  * Returns the size of the string INCLUDING the terminating NUL.
1476  * On exception, returns 0.
1477  * If the string is too long, returns a value greater than @n.
1478  */
1479 static inline long strnlen_user(const char __user *s, long n)
1480 {
1481         long res;
1482
1483         might_fault();
1484         if (eva_kernel_access()) {
1485                 __asm__ __volatile__(
1486                         "move\t$4, %1\n\t"
1487                         "move\t$5, %2\n\t"
1488                         __MODULE_JAL(__strnlen_kernel_asm)
1489                         "move\t%0, $2"
1490                         : "=r" (res)
1491                         : "r" (s), "r" (n)
1492                         : "$2", "$4", "$5", __UA_t0, "$31");
1493         } else {
1494                 __asm__ __volatile__(
1495                         "move\t$4, %1\n\t"
1496                         "move\t$5, %2\n\t"
1497                         __MODULE_JAL(__strnlen_user_asm)
1498                         "move\t%0, $2"
1499                         : "=r" (res)
1500                         : "r" (s), "r" (n)
1501                         : "$2", "$4", "$5", __UA_t0, "$31");
1502         }
1503
1504         return res;
1505 }
1506
1507 #endif /* _ASM_UACCESS_H */