x86, VisWS: turn into generic arch, eliminate leftover files
[cascardo/linux.git] / arch / x86 / kernel / setup.c
index d5de157..4064616 100644 (file)
 #include <linux/slab.h>
 #include <linux/user.h>
 #include <linux/delay.h>
-#include <linux/highmem.h>
 
 #include <linux/kallsyms.h>
-#include <linux/edd.h>
-#include <linux/iscsi_ibft.h>
-#include <linux/kexec.h>
 #include <linux/cpufreq.h>
 #include <linux/dma-mapping.h>
 #include <linux/ctype.h>
 #include <asm/paravirt.h>
 
 #include <asm/percpu.h>
-#include <asm/sections.h>
 #include <asm/topology.h>
 #include <asm/apicdef.h>
 #ifdef CONFIG_X86_64
@@ -394,11 +389,10 @@ static void __init parse_setup_data(void)
        }
 }
 
-static void __init reserve_setup_data(void)
+static void __init e820_reserve_setup_data(void)
 {
        struct setup_data *data;
        u64 pa_data;
-       char buf[32];
        int found = 0;
 
        if (boot_params.hdr.version < 0x0209)
@@ -406,8 +400,6 @@ static void __init reserve_setup_data(void)
        pa_data = boot_params.hdr.setup_data;
        while (pa_data) {
                data = early_ioremap(pa_data, sizeof(*data));
-               sprintf(buf, "setup data %x", data->type);
-               reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
                e820_update_range(pa_data, sizeof(*data)+data->len,
                         E820_RAM, E820_RESERVED_KERN);
                found = 1;
@@ -418,10 +410,29 @@ static void __init reserve_setup_data(void)
                return;
 
        sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+       memcpy(&e820_saved, &e820, sizeof(struct e820map));
        printk(KERN_INFO "extended physical RAM map:\n");
        e820_print_map("reserve setup_data");
 }
 
+static void __init reserve_early_setup_data(void)
+{
+       struct setup_data *data;
+       u64 pa_data;
+       char buf[32];
+
+       if (boot_params.hdr.version < 0x0209)
+               return;
+       pa_data = boot_params.hdr.setup_data;
+       while (pa_data) {
+               data = early_ioremap(pa_data, sizeof(*data));
+               sprintf(buf, "setup data %x", data->type);
+               reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
+               pa_data = data->next;
+               early_iounmap(data, sizeof(*data));
+       }
+}
+
 /*
  * --------- Crashkernel reservation ------------------------------
  */
@@ -563,6 +574,10 @@ static int __init setup_elfcorehdr(char *arg)
 early_param("elfcorehdr", setup_elfcorehdr);
 #endif
 
+static struct x86_quirks default_x86_quirks __initdata;
+
+struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
+
 /*
  * Determine if we were loaded by an EFI loader.  If so, then we have also been
  * passed the efi memmap, systab, etc., so we should use these data structures
@@ -580,6 +595,7 @@ void __init setup_arch(char **cmdline_p)
 {
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
+       visws_early_detect();
        pre_setup_arch_hook();
        early_cpu_init();
 #else
@@ -626,6 +642,8 @@ void __init setup_arch(char **cmdline_p)
 
        setup_memory_map();
        parse_setup_data();
+       /* update the e820_saved too */
+       e820_reserve_setup_data();
 
        copy_edd();
 
@@ -656,7 +674,7 @@ void __init setup_arch(char **cmdline_p)
        parse_early_param();
 
        /* after early param, so could get panic from serial */
-       reserve_setup_data();
+       reserve_early_setup_data();
 
        if (acpi_mps_check()) {
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -665,6 +683,11 @@ void __init setup_arch(char **cmdline_p)
                clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
        }
 
+#ifdef CONFIG_PCI
+       if (pci_early_dump_regs)
+               early_dump_pci_devices();
+#endif
+
        finish_e820_parsing();
 
 #ifdef CONFIG_X86_32
@@ -691,22 +714,18 @@ void __init setup_arch(char **cmdline_p)
        early_gart_iommu_check();
 #endif
 
-       e820_register_active_regions(0, 0, -1UL);
        /*
         * partially used pages are not usable - thus
         * we are rounding upwards:
         */
-       max_pfn = e820_end_of_ram();
+       max_pfn = e820_end_of_ram_pfn();
 
        /* preallocate 4k for mptable mpc */
        early_reserve_e820_mpc_new();
        /* update e820 for memory not covered by WB MTRRs */
        mtrr_bp_init();
-       if (mtrr_trim_uncached_memory(max_pfn)) {
-               remove_all_active_ranges();
-               e820_register_active_regions(0, 0, -1UL);
-               max_pfn = e820_end_of_ram();
-       }
+       if (mtrr_trim_uncached_memory(max_pfn))
+               max_pfn = e820_end_of_ram_pfn();
 
 #ifdef CONFIG_X86_32
        /* max_low_pfn get updated here */
@@ -718,12 +737,26 @@ void __init setup_arch(char **cmdline_p)
 
        /* How many end-of-memory variables you have, grandma! */
        /* need this before calling reserve_initrd */
-       max_low_pfn = max_pfn;
+       if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
+               max_low_pfn = e820_end_of_low_ram_pfn();
+       else
+               max_low_pfn = max_pfn;
+
        high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
 #endif
 
        /* max_pfn_mapped is updated here */
-       max_pfn_mapped = init_memory_mapping(0, (max_low_pfn << PAGE_SHIFT));
+       max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
+       max_pfn_mapped = max_low_pfn_mapped;
+
+#ifdef CONFIG_X86_64
+       if (max_pfn > max_low_pfn) {
+               max_pfn_mapped = init_memory_mapping(1UL<<32,
+                                                    max_pfn<<PAGE_SHIFT);
+               /* can we preseve max_low_pfn ?*/
+               max_low_pfn = max_pfn;
+       }
+#endif
 
        /*
         * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -749,9 +782,6 @@ void __init setup_arch(char **cmdline_p)
         */
        acpi_boot_table_init();
 
-       /* Remove active ranges so rediscovery with NUMA-awareness happens */
-       remove_all_active_ranges();
-
 #ifdef CONFIG_ACPI_NUMA
        /*
         * Parse SRAT to discover nodes.
@@ -810,10 +840,6 @@ void __init setup_arch(char **cmdline_p)
         */
        acpi_boot_init();
 
-#ifdef CONFIG_X86_64
-       init_cpu_to_node();
-#endif
-
 #if defined(CONFIG_X86_MPPARSE) || defined(CONFIG_X86_VISWS)
        /*
         * get boot-time SMP configuration:
@@ -822,6 +848,11 @@ void __init setup_arch(char **cmdline_p)
                get_smp_config();
 #endif
 
+       prefill_possible_map();
+#ifdef CONFIG_X86_64
+       init_cpu_to_node();
+#endif
+
        init_apic_mappings();
        ioapic_init_mappings();