Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cascardo/linux.git] / arch / x86 / mm / pageattr.c
index 2713be4..091934e 100644 (file)
@@ -94,12 +94,12 @@ static inline void split_page_count(int level) { }
 
 static inline unsigned long highmap_start_pfn(void)
 {
-       return __pa(_text) >> PAGE_SHIFT;
+       return __pa_symbol(_text) >> PAGE_SHIFT;
 }
 
 static inline unsigned long highmap_end_pfn(void)
 {
-       return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
+       return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
 }
 
 #endif
@@ -276,8 +276,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
         * The .rodata section needs to be read-only. Using the pfn
         * catches all aliases.
         */
-       if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
-                  __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
+       if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+                  __pa_symbol(__end_rodata) >> PAGE_SHIFT))
                pgprot_val(forbidden) |= _PAGE_RW;
 
 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
@@ -363,6 +363,37 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
 }
 EXPORT_SYMBOL_GPL(lookup_address);
 
+/*
+ * This is necessary because __pa() does not work on some
+ * kinds of memory, like vmalloc() or the alloc_remap()
+ * areas on 32-bit NUMA systems.  The percpu areas can
+ * end up in this kind of memory, for instance.
+ *
+ * This could be optimized, but it is only intended to be
+ * used at inititalization time, and keeping it
+ * unoptimized should increase the testing coverage for
+ * the more obscure platforms.
+ */
+phys_addr_t slow_virt_to_phys(void *__virt_addr)
+{
+       unsigned long virt_addr = (unsigned long)__virt_addr;
+       phys_addr_t phys_addr;
+       unsigned long offset;
+       enum pg_level level;
+       unsigned long psize;
+       unsigned long pmask;
+       pte_t *pte;
+
+       pte = lookup_address(virt_addr, &level);
+       BUG_ON(!pte);
+       psize = page_level_size(level);
+       pmask = page_level_mask(level);
+       offset = virt_addr & ~pmask;
+       phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+       return (phys_addr | offset);
+}
+EXPORT_SYMBOL_GPL(slow_virt_to_phys);
+
 /*
  * Set the new pmd in all the pgds we know about:
  */
@@ -396,7 +427,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
        pte_t new_pte, old_pte, *tmp;
        pgprot_t old_prot, new_prot, req_prot;
        int i, do_split = 1;
-       unsigned int level;
+       enum pg_level level;
 
        if (cpa->force_split)
                return 1;
@@ -412,15 +443,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
 
        switch (level) {
        case PG_LEVEL_2M:
-               psize = PMD_PAGE_SIZE;
-               pmask = PMD_PAGE_MASK;
-               break;
 #ifdef CONFIG_X86_64
        case PG_LEVEL_1G:
-               psize = PUD_PAGE_SIZE;
-               pmask = PUD_PAGE_MASK;
-               break;
 #endif
+               psize = page_level_size(level);
+               pmask = page_level_mask(level);
+               break;
        default:
                do_split = -EINVAL;
                goto out_unlock;
@@ -514,21 +542,13 @@ out_unlock:
        return do_split;
 }
 
-static int split_large_page(pte_t *kpte, unsigned long address)
+int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase)
 {
        unsigned long pfn, pfninc = 1;
        unsigned int i, level;
-       pte_t *pbase, *tmp;
+       pte_t *tmp;
        pgprot_t ref_prot;
-       struct page *base;
-
-       if (!debug_pagealloc)
-               spin_unlock(&cpa_lock);
-       base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
-       if (!debug_pagealloc)
-               spin_lock(&cpa_lock);
-       if (!base)
-               return -ENOMEM;
+       struct page *base = virt_to_page(pbase);
 
        spin_lock(&pgd_lock);
        /*
@@ -536,10 +556,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
         * up for us already:
         */
        tmp = lookup_address(address, &level);
-       if (tmp != kpte)
-               goto out_unlock;
+       if (tmp != kpte) {
+               spin_unlock(&pgd_lock);
+               return 1;
+       }
 
-       pbase = (pte_t *)page_address(base);
        paravirt_alloc_pte(&init_mm, page_to_pfn(base));
        ref_prot = pte_pgprot(pte_clrhuge(*kpte));
        /*
@@ -583,16 +604,10 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
                set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
 
-       if (address >= (unsigned long)__va(0) &&
-               address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
+       if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
+                               PFN_DOWN(__pa(address)) + 1))
                split_page_count(level);
 
-#ifdef CONFIG_X86_64
-       if (address >= (unsigned long)__va(1UL<<32) &&
-               address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
-               split_page_count(level);
-#endif
-
        /*
         * Install the new, split up pagetable.
         *
@@ -611,17 +626,27 @@ static int split_large_page(pte_t *kpte, unsigned long address)
         * going on.
         */
        __flush_tlb_all();
+       spin_unlock(&pgd_lock);
+
+       return 0;
+}
 
-       base = NULL;
+static int split_large_page(pte_t *kpte, unsigned long address)
+{
+       pte_t *pbase;
+       struct page *base;
 
-out_unlock:
-       /*
-        * If we dropped out via the lookup_address check under
-        * pgd_lock then stick the page back into the pool:
-        */
-       if (base)
+       if (!debug_pagealloc)
+               spin_unlock(&cpa_lock);
+       base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
+       if (!debug_pagealloc)
+               spin_lock(&cpa_lock);
+       if (!base)
+               return -ENOMEM;
+
+       pbase = (pte_t *)page_address(base);
+       if (__split_large_page(kpte, address, pbase))
                __free_page(base);
-       spin_unlock(&pgd_lock);
 
        return 0;
 }
@@ -773,13 +798,9 @@ static int cpa_process_alias(struct cpa_data *cpa)
        unsigned long vaddr;
        int ret;
 
-       if (cpa->pfn >= max_pfn_mapped)
+       if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
                return 0;
 
-#ifdef CONFIG_X86_64
-       if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
-               return 0;
-#endif
        /*
         * No need to redo, when the primary call touched the direct
         * mapping already: