arm64: kernel: Add support for Privileged Access Never
authorJames Morse <james.morse@arm.com>
Wed, 22 Jul 2015 18:05:54 +0000 (19:05 +0100)
committerWill Deacon <will.deacon@arm.com>
Mon, 27 Jul 2015 10:08:41 +0000 (11:08 +0100)
'Privileged Access Never' is a new arm8.1 feature which prevents
privileged code from accessing any virtual address where read or write
access is also permitted at EL0.

This patch enables the PAN feature on all CPUs, and modifies {get,put}_user
helpers temporarily to permit access.

This will catch kernel bugs where user memory is accessed directly.
'Unprivileged loads and stores' using ldtrb et al are unaffected by PAN.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
[will: use ALTERNATIVE in asm and tidy up pan_enable check]
Signed-off-by: Will Deacon <will.deacon@arm.com>
14 files changed:
arch/arm64/Kconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/uaccess.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpufeature.c
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/mm/fault.c

index de8dee6..c2bd79a 100644 (file)
@@ -596,6 +596,20 @@ config FORCE_MAX_ZONEORDER
        default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
        default "11"
 
+config ARM64_PAN
+       bool "Enable support for Privileged Access Never (PAN)"
+       default y
+       help
+        Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+        prevents the kernel or hypervisor from accessing user-space (EL0)
+        memory directly.
+
+        Choosing this option will cause any unprotected (not using
+        copy_to_user et al) memory access to fail with a permission fault.
+
+        The feature is detected at runtime, and will remain as a 'nop'
+        instruction if the cpu does not implement the feature.
+
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
        depends on COMPAT
index f595f7d..d71140b 100644 (file)
@@ -25,8 +25,9 @@
 #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE   1
 #define ARM64_WORKAROUND_845719                        2
 #define ARM64_HAS_SYSREG_GIC_CPUIF             3
+#define ARM64_HAS_PAN                          4
 
-#define ARM64_NCAPS                            4
+#define ARM64_NCAPS                            5
 
 #ifndef __ASSEMBLY__
 
index 74069b3..775e85b 100644 (file)
 
 #include <linux/futex.h>
 #include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/errno.h>
+#include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)                \
        asm volatile(                                                   \
+       ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,            \
+                   CONFIG_ARM64_PAN)                                   \
 "1:    ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
 "2:    stlxr   %w3, %w0, %2\n"                                         \
@@ -39,6 +45,8 @@
 "      .align  3\n"                                                    \
 "      .quad   1b, 4b, 2b, 4b\n"                                       \
 "      .popsection\n"                                                  \
+       ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,            \
+                   CONFIG_ARM64_PAN)                                   \
        : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)       \
        : "r" (oparg), "Ir" (-EFAULT)                                   \
        : "memory")
index e4c893e..98f3235 100644 (file)
@@ -186,4 +186,6 @@ static inline void spin_lock_prefetch(const void *x)
 
 #endif
 
+void cpu_enable_pan(void);
+
 #endif /* __ASM_PROCESSOR_H */
index 5295bcb..a7f3d4b 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
+#include <asm/opcodes.h>
+
 #define SCTLR_EL1_CP15BEN      (0x1 << 5)
 #define SCTLR_EL1_SED          (0x1 << 8)
 
 #define sys_reg(op0, op1, crn, crm, op2) \
        ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
 
+#define REG_PSTATE_PAN_IMM                     sys_reg(0, 0, 4, 0, 4)
+#define SCTLR_EL1_SPAN                         (1 << 23)
+
+#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+                                    (!!x)<<8 | 0x1f)
+
 #ifdef __ASSEMBLY__
 
        .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
index 07e1ba4..b2ede96 100644 (file)
 #include <linux/string.h>
 #include <linux/thread_info.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
 #include <asm/errno.h>
 #include <asm/memory.h>
 #include <asm/compiler.h>
@@ -131,6 +134,8 @@ static inline void set_fs(mm_segment_t fs)
 do {                                                                   \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,        \
+                       CONFIG_ARM64_PAN));                             \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
                __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err));   \
@@ -148,6 +153,8 @@ do {                                                                        \
                BUILD_BUG();                                            \
        }                                                               \
        (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,        \
+                       CONFIG_ARM64_PAN));                             \
 } while (0)
 
 #define __get_user(x, ptr)                                             \
@@ -194,6 +201,8 @@ do {                                                                        \
 do {                                                                   \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,        \
+                       CONFIG_ARM64_PAN));                             \
        switch (sizeof(*(ptr))) {                                       \
        case 1:                                                         \
                __put_user_asm("strb", "%w", __pu_val, (ptr), (err));   \
@@ -210,6 +219,8 @@ do {                                                                        \
        default:                                                        \
                BUILD_BUG();                                            \
        }                                                               \
+       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,        \
+                       CONFIG_ARM64_PAN));                             \
 } while (0)
 
 #define __put_user(x, ptr)                                             \
index 6913643..208db3d 100644 (file)
@@ -44,6 +44,7 @@
 #define PSR_I_BIT      0x00000080
 #define PSR_A_BIT      0x00000100
 #define PSR_D_BIT      0x00000200
+#define PSR_PAN_BIT    0x00400000
 #define PSR_Q_BIT      0x08000000
 #define PSR_V_BIT      0x10000000
 #define PSR_C_BIT      0x20000000
index 78d56bf..bcee7ab 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
 #include <asm/insn.h>
 #include <asm/opcodes.h>
 #include <asm/sysreg.h>
@@ -280,6 +282,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
  */
 #define __user_swpX_asm(data, addr, res, temp, B)              \
        __asm__ __volatile__(                                   \
+       ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,    \
+                   CONFIG_ARM64_PAN)                           \
        "       mov             %w2, %w1\n"                     \
        "0:     ldxr"B"         %w1, [%3]\n"                    \
        "1:     stxr"B"         %w0, %w2, [%3]\n"               \
@@ -295,7 +299,9 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
        "       .align          3\n"                            \
        "       .quad           0b, 3b\n"                       \
        "       .quad           1b, 3b\n"                       \
-       "       .popsection"                                    \
+       "       .popsection\n"                                  \
+       ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,    \
+               CONFIG_ARM64_PAN)                               \
        : "=&r" (res), "+r" (data), "=&r" (temp)                \
        : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT)              \
        : "memory")
index 74fd0f7..978fa16 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/types.h>
 #include <asm/cpu.h>
 #include <asm/cpufeature.h>
+#include <asm/processor.h>
 
 static bool
 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
@@ -39,6 +40,15 @@ has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
        return feature_matches(val, entry);
 }
 
+static bool __maybe_unused
+has_id_aa64mmfr1_feature(const struct arm64_cpu_capabilities *entry)
+{
+       u64 val;
+
+       val = read_cpuid(id_aa64mmfr1_el1);
+       return feature_matches(val, entry);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -47,6 +57,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .field_pos = 24,
                .min_field_value = 1,
        },
+#ifdef CONFIG_ARM64_PAN
+       {
+               .desc = "Privileged Access Never",
+               .capability = ARM64_HAS_PAN,
+               .matches = has_id_aa64mmfr1_feature,
+               .field_pos = 20,
+               .min_field_value = 1,
+               .enable = cpu_enable_pan,
+       },
+#endif /* CONFIG_ARM64_PAN */
        {},
 };
 
index c17967f..a9723c7 100644 (file)
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
        .text
 
@@ -29,6 +33,8 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        mov     x2, x1                  // save the size for fixup return
        subs    x1, x1, #8
        b.mi    2f
@@ -48,6 +54,8 @@ USER(9f, strh wzr, [x0], #2   )
        b.mi    5f
 USER(9f, strb  wzr, [x0]       )
 5:     mov     x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        ret
 ENDPROC(__clear_user)
 
index 47c3fa5..1be9ef2 100644 (file)
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,6 +32,8 @@
  *     x0 - bytes not copied
  */
 ENTRY(__copy_from_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        add     x5, x1, x2                      // upper user buffer boundary
        subs    x2, x2, #16
        b.mi    1f
@@ -56,6 +62,8 @@ USER(9f, ldrh w3, [x1], #2    )
 USER(9f, ldrb  w3, [x1]        )
        strb    w3, [x0]
 5:     mov     x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        ret
 ENDPROC(__copy_from_user)
 
index 436bcc5..1b94661 100644 (file)
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -30,6 +34,8 @@
  *     x0 - bytes not copied
  */
 ENTRY(__copy_in_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        add     x5, x0, x2                      // upper user buffer boundary
        subs    x2, x2, #16
        b.mi    1f
@@ -58,6 +64,8 @@ USER(9f, strh w3, [x0], #2    )
 USER(9f, ldrb  w3, [x1]        )
 USER(9f, strb  w3, [x0]        )
 5:     mov     x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        ret
 ENDPROC(__copy_in_user)
 
index f5e1f52..a257b47 100644 (file)
  */
 
 #include <linux/linkage.h>
+
+#include <asm/alternative.h>
 #include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,6 +32,8 @@
  *     x0 - bytes not copied
  */
 ENTRY(__copy_to_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        add     x5, x0, x2                      // upper user buffer boundary
        subs    x2, x2, #16
        b.mi    1f
@@ -56,6 +62,8 @@ USER(9f, strh w3, [x0], #2    )
        ldrb    w3, [x1]
 USER(9f, strb  w3, [x0]        )
 5:     mov     x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+           CONFIG_ARM64_PAN)
        ret
 ENDPROC(__copy_to_user)
 
index 94d98cd..ce59121 100644 (file)
 #include <linux/highmem.h>
 #include <linux/perf_event.h>
 
+#include <asm/cpufeature.h>
 #include <asm/exception.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -223,6 +225,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
                mm_flags |= FAULT_FLAG_WRITE;
        }
 
+       /*
+        * PAN bit set implies the fault happened in kernel space, but not
+        * in the arch's user access functions.
+        */
+       if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+               goto no_context;
+
        /*
         * As per x86, we may deadlock here. However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -536,3 +545,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
 
        return 0;
 }
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+       config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */