mm: properly separate the bloated ptl from the regular case
authorPeter Zijlstra <peterz@infradead.org>
Thu, 14 Nov 2013 22:31:52 +0000 (14:31 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Nov 2013 00:32:20 +0000 (09:32 +0900)
Use kernel/bounds.c to convert build-time spinlock_t size check into a
preprocessor symbol and apply that to properly separate the page::ptl
situation.

Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm.h
include/linux/mm_types.h
kernel/bounds.c
mm/memory.c

index d033974..1cedd00 100644 (file)
@@ -1317,27 +1317,29 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
 #if USE_SPLIT_PTE_PTLOCKS
-bool __ptlock_alloc(struct page *page);
-void __ptlock_free(struct page *page);
+#if BLOATED_SPINLOCKS
+extern bool ptlock_alloc(struct page *page);
+extern void ptlock_free(struct page *page);
+
+static inline spinlock_t *ptlock_ptr(struct page *page)
+{
+       return page->ptl;
+}
+#else /* BLOATED_SPINLOCKS */
 static inline bool ptlock_alloc(struct page *page)
 {
-       if (sizeof(spinlock_t) > sizeof(page->ptl))
-               return __ptlock_alloc(page);
        return true;
 }
+
 static inline void ptlock_free(struct page *page)
 {
-       if (sizeof(spinlock_t) > sizeof(page->ptl))
-               __ptlock_free(page);
 }
 
 static inline spinlock_t *ptlock_ptr(struct page *page)
 {
-       if (sizeof(spinlock_t) > sizeof(page->ptl))
-               return (spinlock_t *) page->ptl;
-       else
-               return (spinlock_t *) &page->ptl;
+       return &page->ptl;
 }
+#endif /* BLOATED_SPINLOCKS */
 
 static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
 {
@@ -1354,7 +1356,7 @@ static inline bool ptlock_init(struct page *page)
         * slab code uses page->slab_cache and page->first_page (for tail
         * pages), which share storage with page->ptl.
         */
-       VM_BUG_ON(page->ptl);
+       VM_BUG_ON(*(unsigned long *)&page->ptl);
        if (!ptlock_alloc(page))
                return false;
        spin_lock_init(ptlock_ptr(page));
index 423da79..10f5a72 100644 (file)
@@ -147,10 +147,11 @@ struct page {
                                                 * system if PG_buddy is set.
                                                 */
 #if USE_SPLIT_PTE_PTLOCKS
-               unsigned long ptl; /* It's spinlock_t if it fits to long,
-                                   * otherwise it's pointer to dynamicaly
-                                   * allocated spinlock_t.
-                                   */
+#if BLOATED_SPINLOCKS
+               spinlock_t *ptl;
+#else
+               spinlock_t ptl;
+#endif
 #endif
                struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
                struct page *first_page;        /* Compound tail pages */
index e8ca97b..578782e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kbuild.h>
 #include <linux/page_cgroup.h>
 #include <linux/log2.h>
+#include <linux/spinlock.h>
 
 void foo(void)
 {
@@ -21,5 +22,6 @@ void foo(void)
 #ifdef CONFIG_SMP
        DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
 #endif
+       DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
        /* End of constants */
 }
index 24ffae2..5d9025f 100644 (file)
@@ -4271,21 +4271,20 @@ void copy_user_huge_page(struct page *dst, struct page *src,
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
-#if USE_SPLIT_PTE_PTLOCKS
-bool __ptlock_alloc(struct page *page)
+#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
+bool ptlock_alloc(struct page *page)
 {
        spinlock_t *ptl;
 
        ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
        if (!ptl)
                return false;
-       page->ptl = (unsigned long)ptl;
+       page->ptl = ptl;
        return true;
 }
 
-void __ptlock_free(struct page *page)
+void ptlock_free(struct page *page)
 {
-       if (sizeof(spinlock_t) > sizeof(page->ptl))
-               kfree((spinlock_t *)page->ptl);
+       kfree(page->ptl);
 }
 #endif