arm64: Align less than PAGE_SIZE pgds naturally
authorCatalin Marinas <catalin.marinas@arm.com>
Fri, 10 Oct 2014 14:37:28 +0000 (15:37 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Mon, 20 Oct 2014 16:47:02 +0000 (17:47 +0100)
When the pgd size is smaller than PAGE_SIZE, pgd_alloc() uses kzalloc()
to save space. However, this is not always naturally aligned as required
by the architecture. This patch creates a kmem_cache for pgd allocations
with the correct alignment.

The current kernel configurations with 4K pages + 39-bit VA and 64K
pages + 42-bit VA use a full page for the pgd and are not affected. The
patch is required for 48-bit VA with 64K pages where the pgd is 512
bytes.

Reported-by: Christoffer Dall <christoffer.dall@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/mm/pgd.c

index 62c6101..6682b36 100644 (file)
 
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
+static struct kmem_cache *pgd_cache;
+
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        if (PGD_SIZE == PAGE_SIZE)
                return (pgd_t *)get_zeroed_page(GFP_KERNEL);
        else
-               return kzalloc(PGD_SIZE, GFP_KERNEL);
+               return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
        if (PGD_SIZE == PAGE_SIZE)
                free_page((unsigned long)pgd);
        else
-               kfree(pgd);
+               kmem_cache_free(pgd_cache, pgd);
+}
+
+static int __init pgd_cache_init(void)
+{
+       /*
+        * Naturally aligned pgds required by the architecture.
+        */
+       if (PGD_SIZE != PAGE_SIZE)
+               pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
+                                             SLAB_PANIC, NULL);
+       return 0;
 }
+core_initcall(pgd_cache_init);