x86/asm: Get rid of __read_cr4_safe()
authorAndy Lutomirski <luto@kernel.org>
Thu, 29 Sep 2016 19:48:12 +0000 (12:48 -0700)
committerThomas Gleixner <tglx@linutronix.de>
Fri, 30 Sep 2016 10:40:12 +0000 (12:40 +0200)
We use __read_cr4() vs __read_cr4_safe() inconsistently.  On
CR4-less CPUs, all CR4 bits are effectively clear, so we can make
the code simpler and more robust by making __read_cr4() always fix
up faults on 32-bit kernels.

This may fix some bugs on old 486-like CPUs, but I don't have any
easy way to test that.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: david@saggiorato.net
Link: http://lkml.kernel.org/r/ea647033d357d9ce2ad2bbde5a631045f5052fb6.1475178370.git.luto@kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/tlbflush.h
arch/x86/kernel/paravirt.c
arch/x86/kernel/process_32.c
arch/x86/kernel/setup.c
arch/x86/power/cpu.c
arch/x86/xen/enlighten.c

index 2970d22..91b6f4e 100644 (file)
@@ -80,10 +80,6 @@ static inline unsigned long __read_cr4(void)
 {
        return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
 }
-static inline unsigned long __read_cr4_safe(void)
-{
-       return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
-}
 
 static inline void __write_cr4(unsigned long x)
 {
index 7fa9e77..fcf243f 100644 (file)
@@ -108,7 +108,6 @@ struct pv_cpu_ops {
        unsigned long (*read_cr0)(void);
        void (*write_cr0)(unsigned long);
 
-       unsigned long (*read_cr4_safe)(void);
        unsigned long (*read_cr4)(void);
        void (*write_cr4)(unsigned long);
 
index 587d791..19a2224 100644 (file)
@@ -59,22 +59,19 @@ static inline void native_write_cr3(unsigned long val)
 static inline unsigned long native_read_cr4(void)
 {
        unsigned long val;
-       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
-       return val;
-}
-
-static inline unsigned long native_read_cr4_safe(void)
-{
-       unsigned long val;
-       /* This could fault if %cr4 does not exist. In x86_64, a cr4 always
-        * exists, so it will never fail. */
 #ifdef CONFIG_X86_32
+       /*
+        * This could fault if CR4 does not exist.  Non-existent CR4
+        * is functionally equivalent to CR4 == 0.  Keep it simple and pretend
+        * that CR4 == 0 on CPUs that don't have CR4.
+        */
        asm volatile("1: mov %%cr4, %0\n"
                     "2:\n"
                     _ASM_EXTABLE(1b, 2b)
                     : "=r" (val), "=m" (__force_order) : "0" (0));
 #else
-       val = native_read_cr4();
+       /* CR4 always exists on x86_64. */
+       asm volatile("mov %%cr4,%0\n\t" : "=r" (val), "=m" (__force_order));
 #endif
        return val;
 }
@@ -182,11 +179,6 @@ static inline unsigned long __read_cr4(void)
        return native_read_cr4();
 }
 
-static inline unsigned long __read_cr4_safe(void)
-{
-       return native_read_cr4_safe();
-}
-
 static inline void __write_cr4(unsigned long x)
 {
        native_write_cr4(x);
index dee8a70..6fa8594 100644 (file)
@@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
-       this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
+       this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
 }
 
 /* Set in this cpu's CR4. */
index bef3400..bbf3d59 100644 (file)
@@ -332,7 +332,6 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
        .read_cr0 = native_read_cr0,
        .write_cr0 = native_write_cr0,
        .read_cr4 = native_read_cr4,
-       .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = native_write_cr4,
 #ifdef CONFIG_X86_64
        .read_cr8 = native_read_cr8,
index 404efdf..bd7be8e 100644 (file)
@@ -90,7 +90,7 @@ void __show_regs(struct pt_regs *regs, int all)
        cr0 = read_cr0();
        cr2 = read_cr2();
        cr3 = read_cr3();
-       cr4 = __read_cr4_safe();
+       cr4 = __read_cr4();
        printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
                        cr0, cr2, cr3, cr4);
 
index 87f2330..3aabfdc 100644 (file)
@@ -1137,7 +1137,7 @@ void __init setup_arch(char **cmdline_p)
         * auditing all the early-boot CR4 manipulation would be needed to
         * rule it out.
         */
-       mmu_cr4_features = __read_cr4_safe();
+       mmu_cr4_features = __read_cr4();
 
        memblock_set_current_limit(get_max_mapped());
 
index b12c26e..53cace2 100644 (file)
@@ -130,7 +130,7 @@ static void __save_processor_state(struct saved_context *ctxt)
        ctxt->cr0 = read_cr0();
        ctxt->cr2 = read_cr2();
        ctxt->cr3 = read_cr3();
-       ctxt->cr4 = __read_cr4_safe();
+       ctxt->cr4 = __read_cr4();
 #ifdef CONFIG_X86_64
        ctxt->cr8 = read_cr8();
 #endif
index b86ebb1..e2cf8fc 100644 (file)
@@ -1237,7 +1237,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .write_cr0 = xen_write_cr0,
 
        .read_cr4 = native_read_cr4,
-       .read_cr4_safe = native_read_cr4_safe,
        .write_cr4 = xen_write_cr4,
 
 #ifdef CONFIG_X86_64