Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial
[cascardo/linux.git] / arch / x86 / mm / init.c
index 452f904..649da47 100644 (file)
 
 #include "mm_internal.h"
 
+/*
+ * Tables translating between page_cache_type_t and pte encoding.
+ * Minimal supported modes are defined statically, modified if more supported
+ * cache modes are available.
+ * Index into __cachemode2pte_tbl is the cachemode.
+ * Index into __pte2cachemode_tbl are the caching attribute bits of the pte
+ * (_PAGE_PWT, _PAGE_PCD, _PAGE_PAT) at index bit positions 0, 1, 2.
+ */
+uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
+       [_PAGE_CACHE_MODE_WB]           = 0,
+       [_PAGE_CACHE_MODE_WC]           = _PAGE_PWT,
+       [_PAGE_CACHE_MODE_UC_MINUS]     = _PAGE_PCD,
+       [_PAGE_CACHE_MODE_UC]           = _PAGE_PCD | _PAGE_PWT,
+       [_PAGE_CACHE_MODE_WT]           = _PAGE_PCD,
+       [_PAGE_CACHE_MODE_WP]           = _PAGE_PCD,
+};
+EXPORT_SYMBOL(__cachemode2pte_tbl);
+uint8_t __pte2cachemode_tbl[8] = {
+       [__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
+       [__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
+       [__pte2cm_idx(_PAGE_PCD)] = _PAGE_CACHE_MODE_UC_MINUS,
+       [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD)] = _PAGE_CACHE_MODE_UC,
+       [__pte2cm_idx(_PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
+       [__pte2cm_idx(_PAGE_PWT | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
+       [__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
+       [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
+};
+EXPORT_SYMBOL(__pte2cachemode_tbl);
+
 static unsigned long __initdata pgt_buf_start;
 static unsigned long __initdata pgt_buf_end;
 static unsigned long __initdata pgt_buf_top;
@@ -409,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
 static unsigned long __init get_new_step_size(unsigned long step_size)
 {
        /*
-        * Explain why we shift by 5 and why we don't have to worry about
-        * 'step_size << 5' overflowing:
-        *
-        * initial mapped size is PMD_SIZE (2M).
+        * Initial mapped size is PMD_SIZE (2M).
         * We can not set step_size to be PUD_SIZE (1G) yet.
         * In worse case, when we cross the 1G boundary, and
         * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
-        * to map 1G range with PTE. Use 5 as shift for now.
+        * to map 1G range with PTE. Hence we use one less than the
+        * difference of page table level shifts.
         *
-        * Don't need to worry about overflow, on 32bit, when step_size
-        * is 0, round_down() returns 0 for start, and that turns it
-        * into 0x100000000ULL.
+        * Don't need to worry about overflow in the top-down case, on 32bit,
+        * when step_size is 0, round_down() returns 0 for start, and that
+        * turns it into 0x100000000ULL.
+        * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+        * needs to be taken into consideration by the code below.
         */
-       return step_size << 5;
+       return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
 }
 
 /**
@@ -442,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
        unsigned long step_size;
        unsigned long addr;
        unsigned long mapped_ram_size = 0;
-       unsigned long new_mapped_ram_size;
 
        /* xen has big range in reserved near end of ram, skip it at first.*/
        addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -467,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
                                start = map_start;
                } else
                        start = map_start;
-               new_mapped_ram_size = init_range_memory_mapping(start,
+               mapped_ram_size += init_range_memory_mapping(start,
                                                        last_start);
                last_start = start;
                min_pfn_mapped = last_start >> PAGE_SHIFT;
-               /* only increase step_size after big range get mapped */
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 
        if (real_end < map_end)
@@ -495,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
 static void __init memory_map_bottom_up(unsigned long map_start,
                                        unsigned long map_end)
 {
-       unsigned long next, new_mapped_ram_size, start;
+       unsigned long next, start;
        unsigned long mapped_ram_size = 0;
        /* step_size need to be small so pgt_buf from BRK could cover it */
        unsigned long step_size = PMD_SIZE;
@@ -510,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
         * for page table.
         */
        while (start < map_end) {
-               if (map_end - start > step_size) {
+               if (step_size && map_end - start > step_size) {
                        next = round_up(start + 1, step_size);
                        if (next > map_end)
                                next = map_end;
-               } else
+               } else {
                        next = map_end;
+               }
 
-               new_mapped_ram_size = init_range_memory_mapping(start, next);
+               mapped_ram_size += init_range_memory_mapping(start, next);
                start = next;
 
-               if (new_mapped_ram_size > mapped_ram_size)
+               if (mapped_ram_size >= step_size)
                        step_size = get_new_step_size(step_size);
-               mapped_ram_size += new_mapped_ram_size;
        }
 }
 
@@ -674,10 +700,10 @@ void __init zone_sizes_init(void)
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 
 #ifdef CONFIG_ZONE_DMA
-       max_zone_pfns[ZONE_DMA]         = MAX_DMA_PFN;
+       max_zone_pfns[ZONE_DMA]         = min(MAX_DMA_PFN, max_low_pfn);
 #endif
 #ifdef CONFIG_ZONE_DMA32
-       max_zone_pfns[ZONE_DMA32]       = MAX_DMA32_PFN;
+       max_zone_pfns[ZONE_DMA32]       = min(MAX_DMA32_PFN, max_low_pfn);
 #endif
        max_zone_pfns[ZONE_NORMAL]      = max_low_pfn;
 #ifdef CONFIG_HIGHMEM
@@ -687,3 +713,11 @@ void __init zone_sizes_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
+void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
+{
+       /* entry 0 MUST be WB (hardwired to speed up translations) */
+       BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB);
+
+       __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
+       __pte2cachemode_tbl[entry] = cache;
+}