Merge branch 'uaccess' (batched user access infrastructure)
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2016 21:02:41 +0000 (13:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 21 Jan 2016 21:02:41 +0000 (13:02 -0800)
Expose an interface to allow users to mark several accesses together as
being user space accesses, allowing batching of the surrounding user
space access markers (SMAP on x86, PAN on arm64, domain register
switching on arm).

This is currently only used for the user string lenth and copying
functions, where the SMAP overhead on x86 drowned the actual user
accesses (only noticeable on newer microarchitectures that support SMAP
in the first place, of course).

* user access batching branch:
  Use the new batched user accesses in generic user string handling
  Add 'unsafe' user access functions for batched accesses
  x86: reorganize SMAP handling in user space accesses

1  2 
arch/x86/include/asm/uaccess.h

@@@ -745,14 -762,30 +762,39 @@@ copy_to_user(void __user *to, const voi
  #undef __copy_from_user_overflow
  #undef __copy_to_user_overflow
  
 +/*
 + * We rely on the nested NMI work to allow atomic faults from the NMI path; the
 + * nested NMI paths are careful to preserve CR2.
 + *
 + * Caller must use pagefault_enable/disable, or run in interrupt context,
 + * and also do a uaccess_ok() check
 + */
 +#define __copy_from_user_nmi __copy_from_user_inatomic
 +
+ /*
+  * The "unsafe" user accesses aren't really "unsafe", but the naming
+  * is a big fat warning: you have to not only do the access_ok()
+  * checking before using them, but you have to surround them with the
+  * user_access_begin/end() pair.
+  */
+ #define user_access_begin()   __uaccess_begin()
+ #define user_access_end()     __uaccess_end()
+ #define unsafe_put_user(x, ptr)                                               \
+ ({                                                                            \
+       int __pu_err;                                                           \
+       __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT);         \
+       __builtin_expect(__pu_err, 0);                                          \
+ })
+ #define unsafe_get_user(x, ptr)                                               \
+ ({                                                                            \
+       int __gu_err;                                                           \
+       unsigned long __gu_val;                                                 \
+       __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT);    \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
+       __builtin_expect(__gu_err, 0);                                          \
+ })
  #endif /* _ASM_X86_UACCESS_H */