2 * arch/arm/include/asm/uaccess.h
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #ifndef _ASMARM_UACCESS_H
9 #define _ASMARM_UACCESS_H
12 * User space memory access functions
14 #include <linux/string.h>
15 #include <linux/thread_info.h>
16 #include <asm/errno.h>
17 #include <asm/memory.h>
18 #include <asm/domain.h>
19 #include <asm/unified.h>
20 #include <asm/compiler.h>
22 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
23 #include <asm-generic/uaccess-unaligned.h>
25 #define __get_user_unaligned __get_user
26 #define __put_user_unaligned __put_user
30 #define VERIFY_WRITE 1
33 * The exception table consists of pairs of addresses: the first is the
34 * address of an instruction that is allowed to fault, and the second is
35 * the address at which the program should continue. No registers are
36 * modified, so it is entirely up to the continuation code to figure out
39 * All the routines below use bits of fixup code that are out of line
40 * with the main instruction path. This means when everything is well,
41 * we don't even have to jump over them. Further, they do not intrude
42 * on our cache or tlb entries.
45 struct exception_table_entry
47 unsigned long insn, fixup;
50 extern int fixup_exception(struct pt_regs *regs);
53 * These two are intentionally not defined anywhere - if the kernel
54 * code generates any references to them, that's a bug.
56 extern int __get_user_bad(void);
57 extern int __put_user_bad(void);
60 * Note that this is actually 0x1,0000,0000
62 #define KERNEL_DS 0x00000000
63 #define get_ds() (KERNEL_DS)
67 #define USER_DS TASK_SIZE
68 #define get_fs() (current_thread_info()->addr_limit)
70 static inline void set_fs(mm_segment_t fs)
72 current_thread_info()->addr_limit = fs;
73 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
76 #define segment_eq(a,b) ((a) == (b))
78 #define __addr_ok(addr) ({ \
80 __asm__("cmp %2, %0; movlo %0, #0" \
82 : "0" (current_thread_info()->addr_limit), "r" (addr) \
86 /* We use 33-bit arithmetic here... */
87 #define __range_ok(addr,size) ({ \
88 unsigned long flag, roksum; \
89 __chk_user_ptr(addr); \
90 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
91 : "=&r" (flag), "=&r" (roksum) \
92 : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
97 * Single-value transfer routines. They automatically use the right
98 * size if we just have the right pointer type. Note that the functions
99 * which read from user space (*get_*) need to take care not to leak
100 * kernel data even if the calling code is buggy and fails to check
101 * the return value. This means zeroing out the destination variable
102 * or buffer on error. Normally this is done out of line by the
103 * fixup code, but there are a few places where it intrudes on the
104 * main code path. When we only write to user space, there is no
107 extern int __get_user_1(void *);
108 extern int __get_user_2(void *);
109 extern int __get_user_4(void *);
110 extern int __get_user_lo8(void *);
111 extern int __get_user_8(void *);
113 #define __GUP_CLOBBER_1 "lr", "cc"
114 #ifdef CONFIG_CPU_USE_DOMAINS
115 #define __GUP_CLOBBER_2 "ip", "lr", "cc"
117 #define __GUP_CLOBBER_2 "lr", "cc"
119 #define __GUP_CLOBBER_4 "lr", "cc"
120 #define __GUP_CLOBBER_lo8 "lr", "cc"
121 #define __GUP_CLOBBER_8 "lr", "cc"
123 #define __get_user_x(__r2,__p,__e,__l,__s) \
124 __asm__ __volatile__ ( \
125 __asmeq("%0", "r0") __asmeq("%1", "r2") \
126 __asmeq("%3", "r1") \
127 "bl __get_user_" #__s \
128 : "=&r" (__e), "=r" (__r2) \
129 : "0" (__p), "r" (__l) \
130 : __GUP_CLOBBER_##__s)
132 /* narrowing a double-word get into a single 32bit word register: */
134 #define __get_user_xb(__r2, __p, __e, __l, __s) \
135 __get_user_x(__r2, __p, __e, __l, lo8)
137 #define __get_user_xb __get_user_x
140 #define __get_user_check(x,p) \
142 unsigned long __limit = current_thread_info()->addr_limit - 1; \
143 register const typeof(*(p)) __user *__p asm("r0") = (p);\
144 register typeof(x) __r2 asm("r2"); \
145 register unsigned long __l asm("r1") = __limit; \
146 register int __e asm("r0"); \
147 switch (sizeof(*(__p))) { \
149 __get_user_x(__r2, __p, __e, __l, 1); \
152 __get_user_x(__r2, __p, __e, __l, 2); \
155 __get_user_x(__r2, __p, __e, __l, 4); \
158 if (sizeof((x)) < 8) \
159 __get_user_xb(__r2, __p, __e, __l, 4); \
161 __get_user_x(__r2, __p, __e, __l, 8); \
163 default: __e = __get_user_bad(); break; \
165 x = (typeof(*(p))) __r2; \
169 #define get_user(x,p) \
172 __get_user_check(x,p); \
175 extern int __put_user_1(void *, unsigned int);
176 extern int __put_user_2(void *, unsigned int);
177 extern int __put_user_4(void *, unsigned int);
178 extern int __put_user_8(void *, unsigned long long);
180 #define __put_user_x(__r2,__p,__e,__l,__s) \
181 __asm__ __volatile__ ( \
182 __asmeq("%0", "r0") __asmeq("%2", "r2") \
183 __asmeq("%3", "r1") \
184 "bl __put_user_" #__s \
186 : "0" (__p), "r" (__r2), "r" (__l) \
189 #define __put_user_check(x,p) \
191 unsigned long __limit = current_thread_info()->addr_limit - 1; \
192 const typeof(*(p)) __user *__tmp_p = (p); \
193 register const typeof(*(p)) __r2 asm("r2") = (x); \
194 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
195 register unsigned long __l asm("r1") = __limit; \
196 register int __e asm("r0"); \
197 switch (sizeof(*(__p))) { \
199 __put_user_x(__r2, __p, __e, __l, 1); \
202 __put_user_x(__r2, __p, __e, __l, 2); \
205 __put_user_x(__r2, __p, __e, __l, 4); \
208 __put_user_x(__r2, __p, __e, __l, 8); \
210 default: __e = __put_user_bad(); break; \
215 #define put_user(x,p) \
218 __put_user_check(x,p); \
221 #else /* CONFIG_MMU */
224 * uClinux has only one addr space, so has simplified address limits.
226 #define USER_DS KERNEL_DS
228 #define segment_eq(a,b) (1)
229 #define __addr_ok(addr) ((void)(addr),1)
230 #define __range_ok(addr,size) ((void)(addr),0)
231 #define get_fs() (KERNEL_DS)
233 static inline void set_fs(mm_segment_t fs)
237 #define get_user(x,p) __get_user(x,p)
238 #define put_user(x,p) __put_user(x,p)
240 #endif /* CONFIG_MMU */
242 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
244 #define user_addr_max() \
245 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs())
248 * The "__xxx" versions of the user access functions do not verify the
249 * address space - it must have been done previously with a separate
250 * "access_ok()" call.
252 * The "xxx_error" versions set the third argument to EFAULT if an
253 * error occurs, and leave it unchanged on success. Note that these
254 * versions are void (ie, don't return a value as such).
256 #define __get_user(x,ptr) \
259 __get_user_err((x),(ptr),__gu_err); \
263 #define __get_user_error(x,ptr,err) \
265 __get_user_err((x),(ptr),err); \
269 #define __get_user_err(x,ptr,err) \
271 unsigned long __gu_addr = (unsigned long)(ptr); \
272 unsigned long __gu_val; \
273 __chk_user_ptr(ptr); \
275 switch (sizeof(*(ptr))) { \
276 case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
277 case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
278 case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \
279 default: (__gu_val) = __get_user_bad(); \
281 (x) = (__typeof__(*(ptr)))__gu_val; \
284 #define __get_user_asm_byte(x,addr,err) \
285 __asm__ __volatile__( \
286 "1: " TUSER(ldrb) " %1,[%2],#0\n" \
288 " .pushsection .fixup,\"ax\"\n" \
294 " .pushsection __ex_table,\"a\"\n" \
298 : "+r" (err), "=&r" (x) \
299 : "r" (addr), "i" (-EFAULT) \
303 #define __get_user_asm_half(x,__gu_addr,err) \
305 unsigned long __b1, __b2; \
306 __get_user_asm_byte(__b1, __gu_addr, err); \
307 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
308 (x) = __b1 | (__b2 << 8); \
311 #define __get_user_asm_half(x,__gu_addr,err) \
313 unsigned long __b1, __b2; \
314 __get_user_asm_byte(__b1, __gu_addr, err); \
315 __get_user_asm_byte(__b2, __gu_addr + 1, err); \
316 (x) = (__b1 << 8) | __b2; \
320 #define __get_user_asm_word(x,addr,err) \
321 __asm__ __volatile__( \
322 "1: " TUSER(ldr) " %1,[%2],#0\n" \
324 " .pushsection .fixup,\"ax\"\n" \
330 " .pushsection __ex_table,\"a\"\n" \
334 : "+r" (err), "=&r" (x) \
335 : "r" (addr), "i" (-EFAULT) \
338 #define __put_user(x,ptr) \
341 __put_user_err((x),(ptr),__pu_err); \
345 #define __put_user_error(x,ptr,err) \
347 __put_user_err((x),(ptr),err); \
351 #define __put_user_err(x,ptr,err) \
353 unsigned long __pu_addr = (unsigned long)(ptr); \
354 __typeof__(*(ptr)) __pu_val = (x); \
355 __chk_user_ptr(ptr); \
357 switch (sizeof(*(ptr))) { \
358 case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
359 case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
360 case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \
361 case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \
362 default: __put_user_bad(); \
366 #define __put_user_asm_byte(x,__pu_addr,err) \
367 __asm__ __volatile__( \
368 "1: " TUSER(strb) " %1,[%2],#0\n" \
370 " .pushsection .fixup,\"ax\"\n" \
375 " .pushsection __ex_table,\"a\"\n" \
380 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
384 #define __put_user_asm_half(x,__pu_addr,err) \
386 unsigned long __temp = (unsigned long)(x); \
387 __put_user_asm_byte(__temp, __pu_addr, err); \
388 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \
391 #define __put_user_asm_half(x,__pu_addr,err) \
393 unsigned long __temp = (unsigned long)(x); \
394 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \
395 __put_user_asm_byte(__temp, __pu_addr + 1, err); \
399 #define __put_user_asm_word(x,__pu_addr,err) \
400 __asm__ __volatile__( \
401 "1: " TUSER(str) " %1,[%2],#0\n" \
403 " .pushsection .fixup,\"ax\"\n" \
408 " .pushsection __ex_table,\"a\"\n" \
413 : "r" (x), "r" (__pu_addr), "i" (-EFAULT) \
417 #define __reg_oper0 "%R2"
418 #define __reg_oper1 "%Q2"
420 #define __reg_oper0 "%Q2"
421 #define __reg_oper1 "%R2"
424 #define __put_user_asm_dword(x,__pu_addr,err) \
425 __asm__ __volatile__( \
426 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \
427 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \
428 THUMB( "1: " TUSER(str) " " __reg_oper1 ", [%1]\n" ) \
429 THUMB( "2: " TUSER(str) " " __reg_oper0 ", [%1, #4]\n" ) \
431 " .pushsection .fixup,\"ax\"\n" \
436 " .pushsection __ex_table,\"a\"\n" \
441 : "+r" (err), "+r" (__pu_addr) \
442 : "r" (x), "i" (-EFAULT) \
447 extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
448 extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
449 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
450 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
451 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
453 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
454 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
455 #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
458 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
460 if (access_ok(VERIFY_READ, from, n))
461 n = __copy_from_user(to, from, n);
462 else /* security hole - plug it */
467 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
469 if (access_ok(VERIFY_WRITE, to, n))
470 n = __copy_to_user(to, from, n);
474 #define __copy_to_user_inatomic __copy_to_user
475 #define __copy_from_user_inatomic __copy_from_user
477 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
479 if (access_ok(VERIFY_WRITE, to, n))
480 n = __clear_user(to, n);
484 extern long strncpy_from_user(char *dest, const char __user *src, long count);
486 extern __must_check long strlen_user(const char __user *str);
487 extern __must_check long strnlen_user(const char __user *str, long n);
489 #endif /* _ASMARM_UACCESS_H */