x86: remove more uaccess_32.h complexity
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 23 May 2016 00:21:27 +0000 (17:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 23 May 2016 00:21:27 +0000 (17:21 -0700)
I'm looking at trying to possibly merge the 32-bit and 64-bit versions
of the x86 uaccess.h implementation, but first this needs to be cleaned
up.

For example, the 32-bit version of "__copy_from_user_inatomic()" is
mostly the special cases for the constant size, and it's actually almost
never relevant.  Most users aren't actually using a constant size
anyway, and the few cases that do small constant copies are better off
just using __get_user() instead.

So get rid of the unnecessary complexity.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/uaccess_32.h
kernel/events/uprobes.c
kernel/futex.c
mm/maccess.c

index 537cc88..4b32da2 100644 (file)
@@ -65,32 +65,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long n)
 static __always_inline unsigned long
 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
 {
-       /* Avoid zeroing the tail if the copy fails..
-        * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
-        * but as the zeroing behaviour is only significant when n is not
-        * constant, that shouldn't be a problem.
-        */
-       if (__builtin_constant_p(n)) {
-               unsigned long ret;
-
-               switch (n) {
-               case 1:
-                       __uaccess_begin();
-                       __get_user_size(*(u8 *)to, from, 1, ret, 1);
-                       __uaccess_end();
-                       return ret;
-               case 2:
-                       __uaccess_begin();
-                       __get_user_size(*(u16 *)to, from, 2, ret, 2);
-                       __uaccess_end();
-                       return ret;
-               case 4:
-                       __uaccess_begin();
-                       __get_user_size(*(u32 *)to, from, 4, ret, 4);
-                       __uaccess_end();
-                       return ret;
-               }
-       }
        return __copy_from_user_ll_nozero(to, from, n);
 }
 
index 7edc95e..c01f733 100644 (file)
@@ -1694,8 +1694,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
        int result;
 
        pagefault_disable();
-       result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
-                                                       sizeof(opcode));
+       result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
        pagefault_enable();
 
        if (likely(result == 0))
index c20f06f..ee25f5b 100644 (file)
@@ -729,7 +729,7 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
        int ret;
 
        pagefault_disable();
-       ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
+       ret = __get_user(*dest, from);
        pagefault_enable();
 
        return ret ? -EFAULT : 0;
index d159b1c..78f9274 100644 (file)
@@ -96,8 +96,7 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count)
        pagefault_disable();
 
        do {
-               ret = __copy_from_user_inatomic(dst++,
-                                               (const void __user __force *)src++, 1);
+               ret = __get_user(*dst++, (const char __user __force *)src++);
        } while (dst[-1] && ret == 0 && src - unsafe_addr < count);
 
        dst[-1] = '\0';