x86: allocate and initialize unshared pmds
[cascardo/linux.git] / arch / x86 / mm / pgtable_32.c
index ef1f6cd..3a6c920 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
+#include <linux/nmi.h>
 #include <linux/swap.h>
 #include <linux/smp.h>
 #include <linux/highmem.h>
@@ -39,6 +40,8 @@ void show_mem(void)
        for_each_online_pgdat(pgdat) {
                pgdat_resize_lock(pgdat, &flags);
                for (i = 0; i < pgdat->node_spanned_pages; ++i) {
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+                               touch_nmi_watchdog();
                        page = pgdat_page_nr(pgdat, i);
                        total++;
                        if (PageHighMem(page))
@@ -97,8 +100,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
        }
        pte = pte_offset_kernel(pmd, vaddr);
        if (pgprot_val(flags))
-               /* <pfn,flags> stored as-is, to permit clearing entries */
-               set_pte(pte, pfn_pte(pfn, flags));
+               set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
        else
                pte_clear(&init_mm, vaddr, pte);
 
@@ -292,6 +294,79 @@ static void pgd_dtor(void *pgd)
 #define UNSHARED_PTRS_PER_PGD                          \
        (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
 
+#ifdef CONFIG_X86_PAE
+/*
+ * Mop up any pmd pages which may still be attached to the pgd.
+ * Normally they will be freed by munmap/exit_mmap, but any pmd we
+ * preallocate which never got a corresponding vma will need to be
+ * freed manually.
+ */
+static void pgd_mop_up_pmds(pgd_t *pgdp)
+{
+       int i;
+
+       for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
+               pgd_t pgd = pgdp[i];
+
+               if (pgd_val(pgd) != 0) {
+                       pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
+
+                       pgdp[i] = native_make_pgd(0);
+
+                       paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
+                       pmd_free(pmd);
+               }
+       }
+}
+
+/*
+ * In PAE mode, we need to do a cr3 reload (=tlb flush) when
+ * updating the top-level pagetable entries to guarantee the
+ * processor notices the update.  Since this is expensive, and
+ * all 4 top-level entries are used almost immediately in a
+ * new process's life, we just pre-populate them here.
+ *
+ * Also, if we're in a paravirt environment where the kernel pmd is
+ * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
+ * and initialize the kernel pmds here.
+ */
+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+{
+       pud_t *pud;
+       unsigned long addr;
+       int i;
+
+       pud = pud_offset(pgd, 0);
+       for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
+            i++, pud++, addr += PUD_SIZE) {
+               pmd_t *pmd = pmd_alloc_one(mm, addr);
+
+               if (!pmd) {
+                       pgd_mop_up_pmds(pgd);
+                       return 0;
+               }
+
+               if (i >= USER_PTRS_PER_PGD)
+                       memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
+                              sizeof(pmd_t) * PTRS_PER_PMD);
+
+               pud_populate(mm, pud, pmd);
+       }
+
+       return 1;
+}
+#else  /* !CONFIG_X86_PAE */
+/* No need to prepopulate any pagetable entries in non-PAE modes. */
+static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
+{
+       return 1;
+}
+
+static void pgd_mop_up_pmds(pgd_t *pgd)
+{
+}
+#endif /* CONFIG_X86_PAE */
+
 /* If we allocate a pmd for part of the kernel address space, then
    make sure its initialized with the appropriate kernel mappings.
    Otherwise use a cached zeroed pmd.  */
@@ -328,15 +403,22 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
        if (PTRS_PER_PMD == 1 || !pgd)
                return pgd;
 
+       mm->pgd = pgd;          /* so that alloc_pd can use it */
+
        for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
                pmd_t *pmd = pmd_cache_alloc(i);
 
                if (!pmd)
                        goto out_oom;
 
-               paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT);
+               paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
                set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
        }
+       if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
+               quicklist_free(0, pgd_dtor, pgd);
+               pgd = NULL;
+       }
+
        return pgd;
 
 out_oom:
@@ -363,6 +445,7 @@ void pgd_free(pgd_t *pgd)
                        pmd_cache_free(pmd, i);
                }
        /* in the non-PAE case, free_pgtables() clears user pgd entries */
+       pgd_mop_up_pmds(pgd);
        quicklist_free(0, pgd_dtor, pgd);
 }
 
@@ -370,4 +453,3 @@ void check_pgt_cache(void)
 {
        quicklist_trim(0, pgd_dtor, 25, 16);
 }
-