Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[cascardo/linux.git] / arch / x86 / kernel / cpu / common.c
index 351197c..b28e526 100644 (file)
@@ -32,8 +32,7 @@
 #include <asm/setup.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mtrr.h>
 #include <linux/numa.h>
 #include <asm/asm.h>
@@ -146,32 +145,21 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 } };
 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
-static int __init x86_xsave_setup(char *s)
+static int __init x86_mpx_setup(char *s)
 {
+       /* require an exact match without trailing characters */
        if (strlen(s))
                return 0;
-       setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
-       setup_clear_cpu_cap(X86_FEATURE_AVX);
-       setup_clear_cpu_cap(X86_FEATURE_AVX2);
-       return 1;
-}
-__setup("noxsave", x86_xsave_setup);
 
-static int __init x86_xsaveopt_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-       return 1;
-}
-__setup("noxsaveopt", x86_xsaveopt_setup);
+       /* do not emit a message if the feature is not present */
+       if (!boot_cpu_has(X86_FEATURE_MPX))
+               return 1;
 
-static int __init x86_xsaves_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+       setup_clear_cpu_cap(X86_FEATURE_MPX);
+       pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
        return 1;
 }
-__setup("noxsaves", x86_xsaves_setup);
+__setup("nompx", x86_mpx_setup);
 
 #ifdef CONFIG_X86_32
 static int cachesize_override = -1;
@@ -184,14 +172,6 @@ static int __init cachesize_setup(char *str)
 }
 __setup("cachesize=", cachesize_setup);
 
-static int __init x86_fxsr_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_FXSR);
-       setup_clear_cpu_cap(X86_FEATURE_XMM);
-       return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
 static int __init x86_sep_setup(char *s)
 {
        setup_clear_cpu_cap(X86_FEATURE_SEP);
@@ -762,7 +742,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        cpu_detect(c);
        get_cpu_vendor(c);
        get_cpu_cap(c);
-       fpu_detect(c);
+       fpu__init_system(c);
 
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
@@ -1186,8 +1166,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
-
 /*
  * Special IST stacks which the CPU switches to when it calls
  * an IST-marked descriptor entry. Up to 7 stacks (hardware
@@ -1278,7 +1256,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
-DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
 
 /*
  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
@@ -1442,7 +1419,7 @@ void cpu_init(void)
        clear_all_debug_regs();
        dbg_restore_debug_regs();
 
-       fpu_init();
+       fpu__init_cpu();
 
        if (is_uv_system())
                uv_cpu_init();
@@ -1498,7 +1475,7 @@ void cpu_init(void)
        clear_all_debug_regs();
        dbg_restore_debug_regs();
 
-       fpu_init();
+       fpu__init_cpu();
 }
 #endif