geneve: avoid using stale geneve socket.
[cascardo/linux.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <linux/string.h>
18 #include <asm/asm-eva.h>
19 #include <asm/extable.h>
20
21 /*
22  * The fs value determines whether argument validity checking should be
23  * performed or not.  If get_fs() == USER_DS, checking is performed, with
24  * get_fs() == KERNEL_DS, checking is bypassed.
25  *
26  * For historical reasons, these macros are grossly misnamed.
27  */
28 #ifdef CONFIG_32BIT
29
30 #ifdef CONFIG_KVM_GUEST
31 #define __UA_LIMIT 0x40000000UL
32 #else
33 #define __UA_LIMIT 0x80000000UL
34 #endif
35
36 #define __UA_ADDR       ".word"
37 #define __UA_LA         "la"
38 #define __UA_ADDU       "addu"
39 #define __UA_t0         "$8"
40 #define __UA_t1         "$9"
41
42 #endif /* CONFIG_32BIT */
43
44 #ifdef CONFIG_64BIT
45
46 extern u64 __ua_limit;
47
48 #define __UA_LIMIT      __ua_limit
49
50 #define __UA_ADDR       ".dword"
51 #define __UA_LA         "dla"
52 #define __UA_ADDU       "daddu"
53 #define __UA_t0         "$12"
54 #define __UA_t1         "$13"
55
56 #endif /* CONFIG_64BIT */
57
58 /*
59  * USER_DS is a bitmask that has the bits set that may not be set in a valid
60  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
61  * the arithmetic we're doing only works if the limit is a power of two, so
62  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
63  * address in this range it's the process's problem, not ours :-)
64  */
65
66 #ifdef CONFIG_KVM_GUEST
67 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
68 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
69 #else
70 #define KERNEL_DS       ((mm_segment_t) { 0UL })
71 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
72 #endif
73
74 #define VERIFY_READ    0
75 #define VERIFY_WRITE   1
76
77 #define get_ds()        (KERNEL_DS)
78 #define get_fs()        (current_thread_info()->addr_limit)
79 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
80
81 #define segment_eq(a, b)        ((a).seg == (b).seg)
82
83 /*
84  * eva_kernel_access() - determine whether kernel memory access on an EVA system
85  *
86  * Determines whether memory accesses should be performed to kernel memory
87  * on a system using Extended Virtual Addressing (EVA).
88  *
89  * Return: true if a kernel memory access on an EVA system, else false.
90  */
91 static inline bool eva_kernel_access(void)
92 {
93         if (!IS_ENABLED(CONFIG_EVA))
94                 return false;
95
96         return segment_eq(get_fs(), get_ds());
97 }
98
99 /*
100  * Is a address valid? This does a straightforward calculation rather
101  * than tests.
102  *
103  * Address valid if:
104  *  - "addr" doesn't have any high-bits set
105  *  - AND "size" doesn't have any high-bits set
106  *  - AND "addr+size" doesn't have any high-bits set
107  *  - OR we are in kernel mode.
108  *
109  * __ua_size() is a trick to avoid runtime checking of positive constant
110  * sizes; for those we already know at compile time that the size is ok.
111  */
112 #define __ua_size(size)                                                 \
113         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
114
115 /*
116  * access_ok: - Checks if a user space pointer is valid
117  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
118  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
119  *        to write to a block, it is always safe to read from it.
120  * @addr: User space pointer to start of block to check
121  * @size: Size of block to check
122  *
123  * Context: User context only. This function may sleep if pagefaults are
124  *          enabled.
125  *
126  * Checks if a pointer to a block of memory in user space is valid.
127  *
128  * Returns true (nonzero) if the memory block may be valid, false (zero)
129  * if it is definitely invalid.
130  *
131  * Note that, depending on architecture, this function probably just
132  * checks that the pointer is in the user space range - after calling
133  * this function, memory access functions may still return -EFAULT.
134  */
135
136 #define __access_mask get_fs().seg
137
138 #define __access_ok(addr, size, mask)                                   \
139 ({                                                                      \
140         unsigned long __addr = (unsigned long) (addr);                  \
141         unsigned long __size = size;                                    \
142         unsigned long __mask = mask;                                    \
143         unsigned long __ok;                                             \
144                                                                         \
145         __chk_user_ptr(addr);                                           \
146         __ok = (signed long)(__mask & (__addr | (__addr + __size) |     \
147                 __ua_size(__size)));                                    \
148         __ok == 0;                                                      \
149 })
150
151 #define access_ok(type, addr, size)                                     \
152         likely(__access_ok((addr), (size), __access_mask))
153
154 /*
155  * put_user: - Write a simple value into user space.
156  * @x:   Value to copy to user space.
157  * @ptr: Destination address, in user space.
158  *
159  * Context: User context only. This function may sleep if pagefaults are
160  *          enabled.
161  *
162  * This macro copies a single simple value from kernel space to user
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
167  * to the result of dereferencing @ptr.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  */
171 #define put_user(x,ptr) \
172         __put_user_check((x), (ptr), sizeof(*(ptr)))
173
174 /*
175  * get_user: - Get a simple variable from user space.
176  * @x:   Variable to store result.
177  * @ptr: Source address, in user space.
178  *
179  * Context: User context only. This function may sleep if pagefaults are
180  *          enabled.
181  *
182  * This macro copies a single simple variable from user space to kernel
183  * space.  It supports simple types like char and int, but not larger
184  * data types like structures or arrays.
185  *
186  * @ptr must have pointer-to-simple-variable type, and the result of
187  * dereferencing @ptr must be assignable to @x without a cast.
188  *
189  * Returns zero on success, or -EFAULT on error.
190  * On error, the variable @x is set to zero.
191  */
192 #define get_user(x,ptr) \
193         __get_user_check((x), (ptr), sizeof(*(ptr)))
194
195 /*
196  * __put_user: - Write a simple value into user space, with less checking.
197  * @x:   Value to copy to user space.
198  * @ptr: Destination address, in user space.
199  *
200  * Context: User context only. This function may sleep if pagefaults are
201  *          enabled.
202  *
203  * This macro copies a single simple value from kernel space to user
204  * space.  It supports simple types like char and int, but not larger
205  * data types like structures or arrays.
206  *
207  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
208  * to the result of dereferencing @ptr.
209  *
210  * Caller must check the pointer with access_ok() before calling this
211  * function.
212  *
213  * Returns zero on success, or -EFAULT on error.
214  */
215 #define __put_user(x,ptr) \
216         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
217
218 /*
219  * __get_user: - Get a simple variable from user space, with less checking.
220  * @x:   Variable to store result.
221  * @ptr: Source address, in user space.
222  *
223  * Context: User context only. This function may sleep if pagefaults are
224  *          enabled.
225  *
226  * This macro copies a single simple variable from user space to kernel
227  * space.  It supports simple types like char and int, but not larger
228  * data types like structures or arrays.
229  *
230  * @ptr must have pointer-to-simple-variable type, and the result of
231  * dereferencing @ptr must be assignable to @x without a cast.
232  *
233  * Caller must check the pointer with access_ok() before calling this
234  * function.
235  *
236  * Returns zero on success, or -EFAULT on error.
237  * On error, the variable @x is set to zero.
238  */
239 #define __get_user(x,ptr) \
240         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
241
242 struct __large_struct { unsigned long buf[100]; };
243 #define __m(x) (*(struct __large_struct __user *)(x))
244
245 /*
246  * Yuck.  We need two variants, one for 64bit operation and one
247  * for 32 bit mode and old iron.
248  */
249 #ifndef CONFIG_EVA
250 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
251 #else
252 /*
253  * Kernel specific functions for EVA. We need to use normal load instructions
254  * to read data from kernel when operating in EVA mode. We use these macros to
255  * avoid redefining __get_user_asm for EVA.
256  */
257 #undef _loadd
258 #undef _loadw
259 #undef _loadh
260 #undef _loadb
261 #ifdef CONFIG_32BIT
262 #define _loadd                  _loadw
263 #else
264 #define _loadd(reg, addr)       "ld " reg ", " addr
265 #endif
266 #define _loadw(reg, addr)       "lw " reg ", " addr
267 #define _loadh(reg, addr)       "lh " reg ", " addr
268 #define _loadb(reg, addr)       "lb " reg ", " addr
269
270 #define __get_kernel_common(val, size, ptr)                             \
271 do {                                                                    \
272         switch (size) {                                                 \
273         case 1: __get_data_asm(val, _loadb, ptr); break;                \
274         case 2: __get_data_asm(val, _loadh, ptr); break;                \
275         case 4: __get_data_asm(val, _loadw, ptr); break;                \
276         case 8: __GET_DW(val, _loadd, ptr); break;                      \
277         default: __get_user_unknown(); break;                           \
278         }                                                               \
279 } while (0)
280 #endif
281
282 #ifdef CONFIG_32BIT
283 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
284 #endif
285 #ifdef CONFIG_64BIT
286 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
287 #endif
288
289 extern void __get_user_unknown(void);
290
291 #define __get_user_common(val, size, ptr)                               \
292 do {                                                                    \
293         switch (size) {                                                 \
294         case 1: __get_data_asm(val, user_lb, ptr); break;               \
295         case 2: __get_data_asm(val, user_lh, ptr); break;               \
296         case 4: __get_data_asm(val, user_lw, ptr); break;               \
297         case 8: __GET_DW(val, user_ld, ptr); break;                     \
298         default: __get_user_unknown(); break;                           \
299         }                                                               \
300 } while (0)
301
302 #define __get_user_nocheck(x, ptr, size)                                \
303 ({                                                                      \
304         int __gu_err;                                                   \
305                                                                         \
306         if (eva_kernel_access()) {                                      \
307                 __get_kernel_common((x), size, ptr);                    \
308         } else {                                                        \
309                 __chk_user_ptr(ptr);                                    \
310                 __get_user_common((x), size, ptr);                      \
311         }                                                               \
312         __gu_err;                                                       \
313 })
314
315 #define __get_user_check(x, ptr, size)                                  \
316 ({                                                                      \
317         int __gu_err = -EFAULT;                                         \
318         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
319                                                                         \
320         might_fault();                                                  \
321         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
322                 if (eva_kernel_access())                                \
323                         __get_kernel_common((x), size, __gu_ptr);       \
324                 else                                                    \
325                         __get_user_common((x), size, __gu_ptr);         \
326         } else                                                          \
327                 (x) = 0;                                                \
328                                                                         \
329         __gu_err;                                                       \
330 })
331
332 #define __get_data_asm(val, insn, addr)                                 \
333 {                                                                       \
334         long __gu_tmp;                                                  \
335                                                                         \
336         __asm__ __volatile__(                                           \
337         "1:     "insn("%1", "%3")"                              \n"     \
338         "2:                                                     \n"     \
339         "       .insn                                           \n"     \
340         "       .section .fixup,\"ax\"                          \n"     \
341         "3:     li      %0, %4                                  \n"     \
342         "       move    %1, $0                                  \n"     \
343         "       j       2b                                      \n"     \
344         "       .previous                                       \n"     \
345         "       .section __ex_table,\"a\"                       \n"     \
346         "       "__UA_ADDR "\t1b, 3b                            \n"     \
347         "       .previous                                       \n"     \
348         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
349         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
350                                                                         \
351         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
352 }
353
354 /*
355  * Get a long long 64 using 32 bit registers.
356  */
357 #define __get_data_asm_ll32(val, insn, addr)                            \
358 {                                                                       \
359         union {                                                         \
360                 unsigned long long      l;                              \
361                 __typeof__(*(addr))     t;                              \
362         } __gu_tmp;                                                     \
363                                                                         \
364         __asm__ __volatile__(                                           \
365         "1:     " insn("%1", "(%3)")"                           \n"     \
366         "2:     " insn("%D1", "4(%3)")"                         \n"     \
367         "3:                                                     \n"     \
368         "       .insn                                           \n"     \
369         "       .section        .fixup,\"ax\"                   \n"     \
370         "4:     li      %0, %4                                  \n"     \
371         "       move    %1, $0                                  \n"     \
372         "       move    %D1, $0                                 \n"     \
373         "       j       3b                                      \n"     \
374         "       .previous                                       \n"     \
375         "       .section        __ex_table,\"a\"                \n"     \
376         "       " __UA_ADDR "   1b, 4b                          \n"     \
377         "       " __UA_ADDR "   2b, 4b                          \n"     \
378         "       .previous                                       \n"     \
379         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
380         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
381                                                                         \
382         (val) = __gu_tmp.t;                                             \
383 }
384
385 #ifndef CONFIG_EVA
386 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
387 #else
388 /*
389  * Kernel specific functions for EVA. We need to use normal load instructions
390  * to read data from kernel when operating in EVA mode. We use these macros to
391  * avoid redefining __get_data_asm for EVA.
392  */
393 #undef _stored
394 #undef _storew
395 #undef _storeh
396 #undef _storeb
397 #ifdef CONFIG_32BIT
398 #define _stored                 _storew
399 #else
400 #define _stored(reg, addr)      "ld " reg ", " addr
401 #endif
402
403 #define _storew(reg, addr)      "sw " reg ", " addr
404 #define _storeh(reg, addr)      "sh " reg ", " addr
405 #define _storeb(reg, addr)      "sb " reg ", " addr
406
407 #define __put_kernel_common(ptr, size)                                  \
408 do {                                                                    \
409         switch (size) {                                                 \
410         case 1: __put_data_asm(_storeb, ptr); break;                    \
411         case 2: __put_data_asm(_storeh, ptr); break;                    \
412         case 4: __put_data_asm(_storew, ptr); break;                    \
413         case 8: __PUT_DW(_stored, ptr); break;                          \
414         default: __put_user_unknown(); break;                           \
415         }                                                               \
416 } while(0)
417 #endif
418
419 /*
420  * Yuck.  We need two variants, one for 64bit operation and one
421  * for 32 bit mode and old iron.
422  */
423 #ifdef CONFIG_32BIT
424 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
425 #endif
426 #ifdef CONFIG_64BIT
427 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
428 #endif
429
430 #define __put_user_common(ptr, size)                                    \
431 do {                                                                    \
432         switch (size) {                                                 \
433         case 1: __put_data_asm(user_sb, ptr); break;                    \
434         case 2: __put_data_asm(user_sh, ptr); break;                    \
435         case 4: __put_data_asm(user_sw, ptr); break;                    \
436         case 8: __PUT_DW(user_sd, ptr); break;                          \
437         default: __put_user_unknown(); break;                           \
438         }                                                               \
439 } while (0)
440
441 #define __put_user_nocheck(x, ptr, size)                                \
442 ({                                                                      \
443         __typeof__(*(ptr)) __pu_val;                                    \
444         int __pu_err = 0;                                               \
445                                                                         \
446         __pu_val = (x);                                                 \
447         if (eva_kernel_access()) {                                      \
448                 __put_kernel_common(ptr, size);                         \
449         } else {                                                        \
450                 __chk_user_ptr(ptr);                                    \
451                 __put_user_common(ptr, size);                           \
452         }                                                               \
453         __pu_err;                                                       \
454 })
455
456 #define __put_user_check(x, ptr, size)                                  \
457 ({                                                                      \
458         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
459         __typeof__(*(ptr)) __pu_val = (x);                              \
460         int __pu_err = -EFAULT;                                         \
461                                                                         \
462         might_fault();                                                  \
463         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
464                 if (eva_kernel_access())                                \
465                         __put_kernel_common(__pu_addr, size);           \
466                 else                                                    \
467                         __put_user_common(__pu_addr, size);             \
468         }                                                               \
469                                                                         \
470         __pu_err;                                                       \
471 })
472
473 #define __put_data_asm(insn, ptr)                                       \
474 {                                                                       \
475         __asm__ __volatile__(                                           \
476         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
477         "2:                                                     \n"     \
478         "       .insn                                           \n"     \
479         "       .section        .fixup,\"ax\"                   \n"     \
480         "3:     li      %0, %4                                  \n"     \
481         "       j       2b                                      \n"     \
482         "       .previous                                       \n"     \
483         "       .section        __ex_table,\"a\"                \n"     \
484         "       " __UA_ADDR "   1b, 3b                          \n"     \
485         "       .previous                                       \n"     \
486         : "=r" (__pu_err)                                               \
487         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
488           "i" (-EFAULT));                                               \
489 }
490
491 #define __put_data_asm_ll32(insn, ptr)                                  \
492 {                                                                       \
493         __asm__ __volatile__(                                           \
494         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
495         "2:     "insn("%D2", "4(%3)")"                          \n"     \
496         "3:                                                     \n"     \
497         "       .insn                                           \n"     \
498         "       .section        .fixup,\"ax\"                   \n"     \
499         "4:     li      %0, %4                                  \n"     \
500         "       j       3b                                      \n"     \
501         "       .previous                                       \n"     \
502         "       .section        __ex_table,\"a\"                \n"     \
503         "       " __UA_ADDR "   1b, 4b                          \n"     \
504         "       " __UA_ADDR "   2b, 4b                          \n"     \
505         "       .previous"                                              \
506         : "=r" (__pu_err)                                               \
507         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
508           "i" (-EFAULT));                                               \
509 }
510
511 extern void __put_user_unknown(void);
512
513 /*
514  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
515  * EVA unaligned access is handled in the ADE exception handler.
516  */
517 #ifndef CONFIG_EVA
518 /*
519  * put_user_unaligned: - Write a simple value into user space.
520  * @x:   Value to copy to user space.
521  * @ptr: Destination address, in user space.
522  *
523  * Context: User context only. This function may sleep if pagefaults are
524  *          enabled.
525  *
526  * This macro copies a single simple value from kernel space to user
527  * space.  It supports simple types like char and int, but not larger
528  * data types like structures or arrays.
529  *
530  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
531  * to the result of dereferencing @ptr.
532  *
533  * Returns zero on success, or -EFAULT on error.
534  */
535 #define put_user_unaligned(x,ptr)       \
536         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
537
538 /*
539  * get_user_unaligned: - Get a simple variable from user space.
540  * @x:   Variable to store result.
541  * @ptr: Source address, in user space.
542  *
543  * Context: User context only. This function may sleep if pagefaults are
544  *          enabled.
545  *
546  * This macro copies a single simple variable from user space to kernel
547  * space.  It supports simple types like char and int, but not larger
548  * data types like structures or arrays.
549  *
550  * @ptr must have pointer-to-simple-variable type, and the result of
551  * dereferencing @ptr must be assignable to @x without a cast.
552  *
553  * Returns zero on success, or -EFAULT on error.
554  * On error, the variable @x is set to zero.
555  */
556 #define get_user_unaligned(x,ptr) \
557         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
558
559 /*
560  * __put_user_unaligned: - Write a simple value into user space, with less checking.
561  * @x:   Value to copy to user space.
562  * @ptr: Destination address, in user space.
563  *
564  * Context: User context only. This function may sleep if pagefaults are
565  *          enabled.
566  *
567  * This macro copies a single simple value from kernel space to user
568  * space.  It supports simple types like char and int, but not larger
569  * data types like structures or arrays.
570  *
571  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
572  * to the result of dereferencing @ptr.
573  *
574  * Caller must check the pointer with access_ok() before calling this
575  * function.
576  *
577  * Returns zero on success, or -EFAULT on error.
578  */
579 #define __put_user_unaligned(x,ptr) \
580         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
581
582 /*
583  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
584  * @x:   Variable to store result.
585  * @ptr: Source address, in user space.
586  *
587  * Context: User context only. This function may sleep if pagefaults are
588  *          enabled.
589  *
590  * This macro copies a single simple variable from user space to kernel
591  * space.  It supports simple types like char and int, but not larger
592  * data types like structures or arrays.
593  *
594  * @ptr must have pointer-to-simple-variable type, and the result of
595  * dereferencing @ptr must be assignable to @x without a cast.
596  *
597  * Caller must check the pointer with access_ok() before calling this
598  * function.
599  *
600  * Returns zero on success, or -EFAULT on error.
601  * On error, the variable @x is set to zero.
602  */
603 #define __get_user_unaligned(x,ptr) \
604         __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
605
606 /*
607  * Yuck.  We need two variants, one for 64bit operation and one
608  * for 32 bit mode and old iron.
609  */
610 #ifdef CONFIG_32BIT
611 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
612         __get_user_unaligned_asm_ll32(val, ptr)
613 #endif
614 #ifdef CONFIG_64BIT
615 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
616         __get_user_unaligned_asm(val, "uld", ptr)
617 #endif
618
619 extern void __get_user_unaligned_unknown(void);
620
621 #define __get_user_unaligned_common(val, size, ptr)                     \
622 do {                                                                    \
623         switch (size) {                                                 \
624         case 1: __get_data_asm(val, "lb", ptr); break;                  \
625         case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;       \
626         case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;       \
627         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
628         default: __get_user_unaligned_unknown(); break;                 \
629         }                                                               \
630 } while (0)
631
632 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
633 ({                                                                      \
634         int __gu_err;                                                   \
635                                                                         \
636         __get_user_unaligned_common((x), size, ptr);                    \
637         __gu_err;                                                       \
638 })
639
640 #define __get_user_unaligned_check(x,ptr,size)                          \
641 ({                                                                      \
642         int __gu_err = -EFAULT;                                         \
643         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
644                                                                         \
645         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
646                 __get_user_unaligned_common((x), size, __gu_ptr);       \
647                                                                         \
648         __gu_err;                                                       \
649 })
650
651 #define __get_data_unaligned_asm(val, insn, addr)                       \
652 {                                                                       \
653         long __gu_tmp;                                                  \
654                                                                         \
655         __asm__ __volatile__(                                           \
656         "1:     " insn "        %1, %3                          \n"     \
657         "2:                                                     \n"     \
658         "       .insn                                           \n"     \
659         "       .section .fixup,\"ax\"                          \n"     \
660         "3:     li      %0, %4                                  \n"     \
661         "       move    %1, $0                                  \n"     \
662         "       j       2b                                      \n"     \
663         "       .previous                                       \n"     \
664         "       .section __ex_table,\"a\"                       \n"     \
665         "       "__UA_ADDR "\t1b, 3b                            \n"     \
666         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
667         "       .previous                                       \n"     \
668         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
669         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
670                                                                         \
671         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
672 }
673
674 /*
675  * Get a long long 64 using 32 bit registers.
676  */
677 #define __get_user_unaligned_asm_ll32(val, addr)                        \
678 {                                                                       \
679         unsigned long long __gu_tmp;                                    \
680                                                                         \
681         __asm__ __volatile__(                                           \
682         "1:     ulw     %1, (%3)                                \n"     \
683         "2:     ulw     %D1, 4(%3)                              \n"     \
684         "       move    %0, $0                                  \n"     \
685         "3:                                                     \n"     \
686         "       .insn                                           \n"     \
687         "       .section        .fixup,\"ax\"                   \n"     \
688         "4:     li      %0, %4                                  \n"     \
689         "       move    %1, $0                                  \n"     \
690         "       move    %D1, $0                                 \n"     \
691         "       j       3b                                      \n"     \
692         "       .previous                                       \n"     \
693         "       .section        __ex_table,\"a\"                \n"     \
694         "       " __UA_ADDR "   1b, 4b                          \n"     \
695         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
696         "       " __UA_ADDR "   2b, 4b                          \n"     \
697         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
698         "       .previous                                       \n"     \
699         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
700         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
701         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
702 }
703
704 /*
705  * Yuck.  We need two variants, one for 64bit operation and one
706  * for 32 bit mode and old iron.
707  */
708 #ifdef CONFIG_32BIT
709 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
710 #endif
711 #ifdef CONFIG_64BIT
712 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
713 #endif
714
715 #define __put_user_unaligned_common(ptr, size)                          \
716 do {                                                                    \
717         switch (size) {                                                 \
718         case 1: __put_data_asm("sb", ptr); break;                       \
719         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
720         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
721         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
722         default: __put_user_unaligned_unknown(); break;                 \
723 } while (0)
724
725 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
726 ({                                                                      \
727         __typeof__(*(ptr)) __pu_val;                                    \
728         int __pu_err = 0;                                               \
729                                                                         \
730         __pu_val = (x);                                                 \
731         __put_user_unaligned_common(ptr, size);                         \
732         __pu_err;                                                       \
733 })
734
735 #define __put_user_unaligned_check(x,ptr,size)                          \
736 ({                                                                      \
737         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
738         __typeof__(*(ptr)) __pu_val = (x);                              \
739         int __pu_err = -EFAULT;                                         \
740                                                                         \
741         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
742                 __put_user_unaligned_common(__pu_addr, size);           \
743                                                                         \
744         __pu_err;                                                       \
745 })
746
747 #define __put_user_unaligned_asm(insn, ptr)                             \
748 {                                                                       \
749         __asm__ __volatile__(                                           \
750         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
751         "2:                                                     \n"     \
752         "       .insn                                           \n"     \
753         "       .section        .fixup,\"ax\"                   \n"     \
754         "3:     li      %0, %4                                  \n"     \
755         "       j       2b                                      \n"     \
756         "       .previous                                       \n"     \
757         "       .section        __ex_table,\"a\"                \n"     \
758         "       " __UA_ADDR "   1b, 3b                          \n"     \
759         "       .previous                                       \n"     \
760         : "=r" (__pu_err)                                               \
761         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
762           "i" (-EFAULT));                                               \
763 }
764
765 #define __put_user_unaligned_asm_ll32(ptr)                              \
766 {                                                                       \
767         __asm__ __volatile__(                                           \
768         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
769         "2:     sw      %D2, 4(%3)                              \n"     \
770         "3:                                                     \n"     \
771         "       .insn                                           \n"     \
772         "       .section        .fixup,\"ax\"                   \n"     \
773         "4:     li      %0, %4                                  \n"     \
774         "       j       3b                                      \n"     \
775         "       .previous                                       \n"     \
776         "       .section        __ex_table,\"a\"                \n"     \
777         "       " __UA_ADDR "   1b, 4b                          \n"     \
778         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
779         "       " __UA_ADDR "   2b, 4b                          \n"     \
780         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
781         "       .previous"                                              \
782         : "=r" (__pu_err)                                               \
783         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
784           "i" (-EFAULT));                                               \
785 }
786
787 extern void __put_user_unaligned_unknown(void);
788 #endif
789
790 /*
791  * We're generating jump to subroutines which will be outside the range of
792  * jump instructions
793  */
794 #ifdef MODULE
795 #define __MODULE_JAL(destination)                                       \
796         ".set\tnoat\n\t"                                                \
797         __UA_LA "\t$1, " #destination "\n\t"                            \
798         "jalr\t$1\n\t"                                                  \
799         ".set\tat\n\t"
800 #else
801 #define __MODULE_JAL(destination)                                       \
802         "jal\t" #destination "\n\t"
803 #endif
804
805 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
806                                               defined(CONFIG_CPU_HAS_PREFETCH))
807 #define DADDI_SCRATCH "$3"
808 #else
809 #define DADDI_SCRATCH "$0"
810 #endif
811
812 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
813
814 #ifndef CONFIG_EVA
815 #define __invoke_copy_to_user(to, from, n)                              \
816 ({                                                                      \
817         register void __user *__cu_to_r __asm__("$4");                  \
818         register const void *__cu_from_r __asm__("$5");                 \
819         register long __cu_len_r __asm__("$6");                         \
820                                                                         \
821         __cu_to_r = (to);                                               \
822         __cu_from_r = (from);                                           \
823         __cu_len_r = (n);                                               \
824         __asm__ __volatile__(                                           \
825         __MODULE_JAL(__copy_user)                                       \
826         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
827         :                                                               \
828         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
829           DADDI_SCRATCH, "memory");                                     \
830         __cu_len_r;                                                     \
831 })
832
833 #define __invoke_copy_to_kernel(to, from, n)                            \
834         __invoke_copy_to_user(to, from, n)
835
836 #endif
837
838 /*
839  * __copy_to_user: - Copy a block of data into user space, with less checking.
840  * @to:   Destination address, in user space.
841  * @from: Source address, in kernel space.
842  * @n:    Number of bytes to copy.
843  *
844  * Context: User context only. This function may sleep if pagefaults are
845  *          enabled.
846  *
847  * Copy data from kernel space to user space.  Caller must check
848  * the specified block with access_ok() before calling this function.
849  *
850  * Returns number of bytes that could not be copied.
851  * On success, this will be zero.
852  */
853 #define __copy_to_user(to, from, n)                                     \
854 ({                                                                      \
855         void __user *__cu_to;                                           \
856         const void *__cu_from;                                          \
857         long __cu_len;                                                  \
858                                                                         \
859         __cu_to = (to);                                                 \
860         __cu_from = (from);                                             \
861         __cu_len = (n);                                                 \
862         might_fault();                                                  \
863         if (eva_kernel_access())                                        \
864                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
865                                                    __cu_len);           \
866         else                                                            \
867                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
868                                                  __cu_len);             \
869         __cu_len;                                                       \
870 })
871
872 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
873
874 #define __copy_to_user_inatomic(to, from, n)                            \
875 ({                                                                      \
876         void __user *__cu_to;                                           \
877         const void *__cu_from;                                          \
878         long __cu_len;                                                  \
879                                                                         \
880         __cu_to = (to);                                                 \
881         __cu_from = (from);                                             \
882         __cu_len = (n);                                                 \
883         if (eva_kernel_access())                                        \
884                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
885                                                    __cu_len);           \
886         else                                                            \
887                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
888                                                  __cu_len);             \
889         __cu_len;                                                       \
890 })
891
892 #define __copy_from_user_inatomic(to, from, n)                          \
893 ({                                                                      \
894         void *__cu_to;                                                  \
895         const void __user *__cu_from;                                   \
896         long __cu_len;                                                  \
897                                                                         \
898         __cu_to = (to);                                                 \
899         __cu_from = (from);                                             \
900         __cu_len = (n);                                                 \
901         if (eva_kernel_access())                                        \
902                 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
903                                                               __cu_from,\
904                                                               __cu_len);\
905         else                                                            \
906                 __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
907                                                             __cu_from,  \
908                                                             __cu_len);  \
909         __cu_len;                                                       \
910 })
911
912 /*
913  * copy_to_user: - Copy a block of data into user space.
914  * @to:   Destination address, in user space.
915  * @from: Source address, in kernel space.
916  * @n:    Number of bytes to copy.
917  *
918  * Context: User context only. This function may sleep if pagefaults are
919  *          enabled.
920  *
921  * Copy data from kernel space to user space.
922  *
923  * Returns number of bytes that could not be copied.
924  * On success, this will be zero.
925  */
926 #define copy_to_user(to, from, n)                                       \
927 ({                                                                      \
928         void __user *__cu_to;                                           \
929         const void *__cu_from;                                          \
930         long __cu_len;                                                  \
931                                                                         \
932         __cu_to = (to);                                                 \
933         __cu_from = (from);                                             \
934         __cu_len = (n);                                                 \
935         if (eva_kernel_access()) {                                      \
936                 __cu_len = __invoke_copy_to_kernel(__cu_to,             \
937                                                    __cu_from,           \
938                                                    __cu_len);           \
939         } else {                                                        \
940                 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
941                         might_fault();                                  \
942                         __cu_len = __invoke_copy_to_user(__cu_to,       \
943                                                          __cu_from,     \
944                                                          __cu_len);     \
945                 }                                                       \
946         }                                                               \
947         __cu_len;                                                       \
948 })
949
950 #ifndef CONFIG_EVA
951
952 #define __invoke_copy_from_user(to, from, n)                            \
953 ({                                                                      \
954         register void *__cu_to_r __asm__("$4");                         \
955         register const void __user *__cu_from_r __asm__("$5");          \
956         register long __cu_len_r __asm__("$6");                         \
957                                                                         \
958         __cu_to_r = (to);                                               \
959         __cu_from_r = (from);                                           \
960         __cu_len_r = (n);                                               \
961         __asm__ __volatile__(                                           \
962         ".set\tnoreorder\n\t"                                           \
963         __MODULE_JAL(__copy_user)                                       \
964         ".set\tnoat\n\t"                                                \
965         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
966         ".set\tat\n\t"                                                  \
967         ".set\treorder"                                                 \
968         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
969         :                                                               \
970         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
971           DADDI_SCRATCH, "memory");                                     \
972         __cu_len_r;                                                     \
973 })
974
975 #define __invoke_copy_from_kernel(to, from, n)                          \
976         __invoke_copy_from_user(to, from, n)
977
978 /* For userland <-> userland operations */
979 #define ___invoke_copy_in_user(to, from, n)                             \
980         __invoke_copy_from_user(to, from, n)
981
982 /* For kernel <-> kernel operations */
983 #define ___invoke_copy_in_kernel(to, from, n)                           \
984         __invoke_copy_from_user(to, from, n)
985
986 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
987 ({                                                                      \
988         register void *__cu_to_r __asm__("$4");                         \
989         register const void __user *__cu_from_r __asm__("$5");          \
990         register long __cu_len_r __asm__("$6");                         \
991                                                                         \
992         __cu_to_r = (to);                                               \
993         __cu_from_r = (from);                                           \
994         __cu_len_r = (n);                                               \
995         __asm__ __volatile__(                                           \
996         ".set\tnoreorder\n\t"                                           \
997         __MODULE_JAL(__copy_user_inatomic)                              \
998         ".set\tnoat\n\t"                                                \
999         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1000         ".set\tat\n\t"                                                  \
1001         ".set\treorder"                                                 \
1002         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1003         :                                                               \
1004         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1005           DADDI_SCRATCH, "memory");                                     \
1006         __cu_len_r;                                                     \
1007 })
1008
1009 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1010         __invoke_copy_from_user_inatomic(to, from, n)                   \
1011
1012 #else
1013
1014 /* EVA specific functions */
1015
1016 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
1017                                        size_t __n);
1018 extern size_t __copy_from_user_eva(void *__to, const void *__from,
1019                                    size_t __n);
1020 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1021                                  size_t __n);
1022 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1023
1024 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)      \
1025 ({                                                                      \
1026         register void *__cu_to_r __asm__("$4");                         \
1027         register const void __user *__cu_from_r __asm__("$5");          \
1028         register long __cu_len_r __asm__("$6");                         \
1029                                                                         \
1030         __cu_to_r = (to);                                               \
1031         __cu_from_r = (from);                                           \
1032         __cu_len_r = (n);                                               \
1033         __asm__ __volatile__(                                           \
1034         ".set\tnoreorder\n\t"                                           \
1035         __MODULE_JAL(func_ptr)                                          \
1036         ".set\tnoat\n\t"                                                \
1037         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
1038         ".set\tat\n\t"                                                  \
1039         ".set\treorder"                                                 \
1040         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1041         :                                                               \
1042         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1043           DADDI_SCRATCH, "memory");                                     \
1044         __cu_len_r;                                                     \
1045 })
1046
1047 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)        \
1048 ({                                                                      \
1049         register void *__cu_to_r __asm__("$4");                         \
1050         register const void __user *__cu_from_r __asm__("$5");          \
1051         register long __cu_len_r __asm__("$6");                         \
1052                                                                         \
1053         __cu_to_r = (to);                                               \
1054         __cu_from_r = (from);                                           \
1055         __cu_len_r = (n);                                               \
1056         __asm__ __volatile__(                                           \
1057         __MODULE_JAL(func_ptr)                                          \
1058         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
1059         :                                                               \
1060         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
1061           DADDI_SCRATCH, "memory");                                     \
1062         __cu_len_r;                                                     \
1063 })
1064
1065 /*
1066  * Source or destination address is in userland. We need to go through
1067  * the TLB
1068  */
1069 #define __invoke_copy_from_user(to, from, n)                            \
1070         __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1071
1072 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
1073         __invoke_copy_from_user_eva_generic(to, from, n,                \
1074                                             __copy_user_inatomic_eva)
1075
1076 #define __invoke_copy_to_user(to, from, n)                              \
1077         __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1078
1079 #define ___invoke_copy_in_user(to, from, n)                             \
1080         __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1081
1082 /*
1083  * Source or destination address in the kernel. We are not going through
1084  * the TLB
1085  */
1086 #define __invoke_copy_from_kernel(to, from, n)                          \
1087         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1088
1089 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
1090         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1091
1092 #define __invoke_copy_to_kernel(to, from, n)                            \
1093         __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1094
1095 #define ___invoke_copy_in_kernel(to, from, n)                           \
1096         __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1097
1098 #endif /* CONFIG_EVA */
1099
1100 /*
1101  * __copy_from_user: - Copy a block of data from user space, with less checking.
1102  * @to:   Destination address, in kernel space.
1103  * @from: Source address, in user space.
1104  * @n:    Number of bytes to copy.
1105  *
1106  * Context: User context only. This function may sleep if pagefaults are
1107  *          enabled.
1108  *
1109  * Copy data from user space to kernel space.  Caller must check
1110  * the specified block with access_ok() before calling this function.
1111  *
1112  * Returns number of bytes that could not be copied.
1113  * On success, this will be zero.
1114  *
1115  * If some data could not be copied, this function will pad the copied
1116  * data to the requested size using zero bytes.
1117  */
1118 #define __copy_from_user(to, from, n)                                   \
1119 ({                                                                      \
1120         void *__cu_to;                                                  \
1121         const void __user *__cu_from;                                   \
1122         long __cu_len;                                                  \
1123                                                                         \
1124         __cu_to = (to);                                                 \
1125         __cu_from = (from);                                             \
1126         __cu_len = (n);                                                 \
1127         if (eva_kernel_access()) {                                      \
1128                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1129                                                      __cu_from,         \
1130                                                      __cu_len);         \
1131         } else {                                                        \
1132                 might_fault();                                          \
1133                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
1134                                                    __cu_len);           \
1135         }                                                               \
1136         __cu_len;                                                       \
1137 })
1138
1139 /*
1140  * copy_from_user: - Copy a block of data from user space.
1141  * @to:   Destination address, in kernel space.
1142  * @from: Source address, in user space.
1143  * @n:    Number of bytes to copy.
1144  *
1145  * Context: User context only. This function may sleep if pagefaults are
1146  *          enabled.
1147  *
1148  * Copy data from user space to kernel space.
1149  *
1150  * Returns number of bytes that could not be copied.
1151  * On success, this will be zero.
1152  *
1153  * If some data could not be copied, this function will pad the copied
1154  * data to the requested size using zero bytes.
1155  */
1156 #define copy_from_user(to, from, n)                                     \
1157 ({                                                                      \
1158         void *__cu_to;                                                  \
1159         const void __user *__cu_from;                                   \
1160         long __cu_len;                                                  \
1161                                                                         \
1162         __cu_to = (to);                                                 \
1163         __cu_from = (from);                                             \
1164         __cu_len = (n);                                                 \
1165         if (eva_kernel_access()) {                                      \
1166                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1167                                                      __cu_from,         \
1168                                                      __cu_len);         \
1169         } else {                                                        \
1170                 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
1171                         might_fault();                                  \
1172                         __cu_len = __invoke_copy_from_user(__cu_to,     \
1173                                                            __cu_from,   \
1174                                                            __cu_len);   \
1175                 } else {                                                \
1176                         memset(__cu_to, 0, __cu_len);                   \
1177                 }                                                       \
1178         }                                                               \
1179         __cu_len;                                                       \
1180 })
1181
1182 #define __copy_in_user(to, from, n)                                     \
1183 ({                                                                      \
1184         void __user *__cu_to;                                           \
1185         const void __user *__cu_from;                                   \
1186         long __cu_len;                                                  \
1187                                                                         \
1188         __cu_to = (to);                                                 \
1189         __cu_from = (from);                                             \
1190         __cu_len = (n);                                                 \
1191         if (eva_kernel_access()) {                                      \
1192                 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1193                                                     __cu_len);          \
1194         } else {                                                        \
1195                 might_fault();                                          \
1196                 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
1197                                                   __cu_len);            \
1198         }                                                               \
1199         __cu_len;                                                       \
1200 })
1201
1202 #define copy_in_user(to, from, n)                                       \
1203 ({                                                                      \
1204         void __user *__cu_to;                                           \
1205         const void __user *__cu_from;                                   \
1206         long __cu_len;                                                  \
1207                                                                         \
1208         __cu_to = (to);                                                 \
1209         __cu_from = (from);                                             \
1210         __cu_len = (n);                                                 \
1211         if (eva_kernel_access()) {                                      \
1212                 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
1213                                                     __cu_len);          \
1214         } else {                                                        \
1215                 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1216                            access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1217                         might_fault();                                  \
1218                         __cu_len = ___invoke_copy_in_user(__cu_to,      \
1219                                                           __cu_from,    \
1220                                                           __cu_len);    \
1221                 }                                                       \
1222         }                                                               \
1223         __cu_len;                                                       \
1224 })
1225
1226 /*
1227  * __clear_user: - Zero a block of memory in user space, with less checking.
1228  * @to:   Destination address, in user space.
1229  * @n:    Number of bytes to zero.
1230  *
1231  * Zero a block of memory in user space.  Caller must check
1232  * the specified block with access_ok() before calling this function.
1233  *
1234  * Returns number of bytes that could not be cleared.
1235  * On success, this will be zero.
1236  */
1237 static inline __kernel_size_t
1238 __clear_user(void __user *addr, __kernel_size_t size)
1239 {
1240         __kernel_size_t res;
1241
1242         if (eva_kernel_access()) {
1243                 __asm__ __volatile__(
1244                         "move\t$4, %1\n\t"
1245                         "move\t$5, $0\n\t"
1246                         "move\t$6, %2\n\t"
1247                         __MODULE_JAL(__bzero_kernel)
1248                         "move\t%0, $6"
1249                         : "=r" (res)
1250                         : "r" (addr), "r" (size)
1251                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1252         } else {
1253                 might_fault();
1254                 __asm__ __volatile__(
1255                         "move\t$4, %1\n\t"
1256                         "move\t$5, $0\n\t"
1257                         "move\t$6, %2\n\t"
1258                         __MODULE_JAL(__bzero)
1259                         "move\t%0, $6"
1260                         : "=r" (res)
1261                         : "r" (addr), "r" (size)
1262                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1263         }
1264
1265         return res;
1266 }
1267
1268 #define clear_user(addr,n)                                              \
1269 ({                                                                      \
1270         void __user * __cl_addr = (addr);                               \
1271         unsigned long __cl_size = (n);                                  \
1272         if (__cl_size && access_ok(VERIFY_WRITE,                        \
1273                                         __cl_addr, __cl_size))          \
1274                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
1275         __cl_size;                                                      \
1276 })
1277
1278 /*
1279  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1280  * @dst:   Destination address, in kernel space.  This buffer must be at
1281  *         least @count bytes long.
1282  * @src:   Source address, in user space.
1283  * @count: Maximum number of bytes to copy, including the trailing NUL.
1284  *
1285  * Copies a NUL-terminated string from userspace to kernel space.
1286  * Caller must check the specified block with access_ok() before calling
1287  * this function.
1288  *
1289  * On success, returns the length of the string (not including the trailing
1290  * NUL).
1291  *
1292  * If access to userspace fails, returns -EFAULT (some data may have been
1293  * copied).
1294  *
1295  * If @count is smaller than the length of the string, copies @count bytes
1296  * and returns @count.
1297  */
1298 static inline long
1299 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1300 {
1301         long res;
1302
1303         if (eva_kernel_access()) {
1304                 __asm__ __volatile__(
1305                         "move\t$4, %1\n\t"
1306                         "move\t$5, %2\n\t"
1307                         "move\t$6, %3\n\t"
1308                         __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1309                         "move\t%0, $2"
1310                         : "=r" (res)
1311                         : "r" (__to), "r" (__from), "r" (__len)
1312                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1313         } else {
1314                 might_fault();
1315                 __asm__ __volatile__(
1316                         "move\t$4, %1\n\t"
1317                         "move\t$5, %2\n\t"
1318                         "move\t$6, %3\n\t"
1319                         __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1320                         "move\t%0, $2"
1321                         : "=r" (res)
1322                         : "r" (__to), "r" (__from), "r" (__len)
1323                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1324         }
1325
1326         return res;
1327 }
1328
1329 /*
1330  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1331  * @dst:   Destination address, in kernel space.  This buffer must be at
1332  *         least @count bytes long.
1333  * @src:   Source address, in user space.
1334  * @count: Maximum number of bytes to copy, including the trailing NUL.
1335  *
1336  * Copies a NUL-terminated string from userspace to kernel space.
1337  *
1338  * On success, returns the length of the string (not including the trailing
1339  * NUL).
1340  *
1341  * If access to userspace fails, returns -EFAULT (some data may have been
1342  * copied).
1343  *
1344  * If @count is smaller than the length of the string, copies @count bytes
1345  * and returns @count.
1346  */
1347 static inline long
1348 strncpy_from_user(char *__to, const char __user *__from, long __len)
1349 {
1350         long res;
1351
1352         if (eva_kernel_access()) {
1353                 __asm__ __volatile__(
1354                         "move\t$4, %1\n\t"
1355                         "move\t$5, %2\n\t"
1356                         "move\t$6, %3\n\t"
1357                         __MODULE_JAL(__strncpy_from_kernel_asm)
1358                         "move\t%0, $2"
1359                         : "=r" (res)
1360                         : "r" (__to), "r" (__from), "r" (__len)
1361                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1362         } else {
1363                 might_fault();
1364                 __asm__ __volatile__(
1365                         "move\t$4, %1\n\t"
1366                         "move\t$5, %2\n\t"
1367                         "move\t$6, %3\n\t"
1368                         __MODULE_JAL(__strncpy_from_user_asm)
1369                         "move\t%0, $2"
1370                         : "=r" (res)
1371                         : "r" (__to), "r" (__from), "r" (__len)
1372                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1373         }
1374
1375         return res;
1376 }
1377
1378 /*
1379  * strlen_user: - Get the size of a string in user space.
1380  * @str: The string to measure.
1381  *
1382  * Context: User context only. This function may sleep if pagefaults are
1383  *          enabled.
1384  *
1385  * Get the size of a NUL-terminated string in user space.
1386  *
1387  * Returns the size of the string INCLUDING the terminating NUL.
1388  * On exception, returns 0.
1389  *
1390  * If there is a limit on the length of a valid string, you may wish to
1391  * consider using strnlen_user() instead.
1392  */
1393 static inline long strlen_user(const char __user *s)
1394 {
1395         long res;
1396
1397         if (eva_kernel_access()) {
1398                 __asm__ __volatile__(
1399                         "move\t$4, %1\n\t"
1400                         __MODULE_JAL(__strlen_kernel_asm)
1401                         "move\t%0, $2"
1402                         : "=r" (res)
1403                         : "r" (s)
1404                         : "$2", "$4", __UA_t0, "$31");
1405         } else {
1406                 might_fault();
1407                 __asm__ __volatile__(
1408                         "move\t$4, %1\n\t"
1409                         __MODULE_JAL(__strlen_user_asm)
1410                         "move\t%0, $2"
1411                         : "=r" (res)
1412                         : "r" (s)
1413                         : "$2", "$4", __UA_t0, "$31");
1414         }
1415
1416         return res;
1417 }
1418
1419 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1420 static inline long __strnlen_user(const char __user *s, long n)
1421 {
1422         long res;
1423
1424         if (eva_kernel_access()) {
1425                 __asm__ __volatile__(
1426                         "move\t$4, %1\n\t"
1427                         "move\t$5, %2\n\t"
1428                         __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1429                         "move\t%0, $2"
1430                         : "=r" (res)
1431                         : "r" (s), "r" (n)
1432                         : "$2", "$4", "$5", __UA_t0, "$31");
1433         } else {
1434                 might_fault();
1435                 __asm__ __volatile__(
1436                         "move\t$4, %1\n\t"
1437                         "move\t$5, %2\n\t"
1438                         __MODULE_JAL(__strnlen_user_nocheck_asm)
1439                         "move\t%0, $2"
1440                         : "=r" (res)
1441                         : "r" (s), "r" (n)
1442                         : "$2", "$4", "$5", __UA_t0, "$31");
1443         }
1444
1445         return res;
1446 }
1447
1448 /*
1449  * strnlen_user: - Get the size of a string in user space.
1450  * @str: The string to measure.
1451  *
1452  * Context: User context only. This function may sleep if pagefaults are
1453  *          enabled.
1454  *
1455  * Get the size of a NUL-terminated string in user space.
1456  *
1457  * Returns the size of the string INCLUDING the terminating NUL.
1458  * On exception, returns 0.
1459  * If the string is too long, returns a value greater than @n.
1460  */
1461 static inline long strnlen_user(const char __user *s, long n)
1462 {
1463         long res;
1464
1465         might_fault();
1466         if (eva_kernel_access()) {
1467                 __asm__ __volatile__(
1468                         "move\t$4, %1\n\t"
1469                         "move\t$5, %2\n\t"
1470                         __MODULE_JAL(__strnlen_kernel_asm)
1471                         "move\t%0, $2"
1472                         : "=r" (res)
1473                         : "r" (s), "r" (n)
1474                         : "$2", "$4", "$5", __UA_t0, "$31");
1475         } else {
1476                 __asm__ __volatile__(
1477                         "move\t$4, %1\n\t"
1478                         "move\t$5, %2\n\t"
1479                         __MODULE_JAL(__strnlen_user_asm)
1480                         "move\t%0, $2"
1481                         : "=r" (res)
1482                         : "r" (s), "r" (n)
1483                         : "$2", "$4", "$5", __UA_t0, "$31");
1484         }
1485
1486         return res;
1487 }
1488
1489 #endif /* _ASM_UACCESS_H */