khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tue, 26 Jul 2016 22:26:26 +0000 (15:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 23:19:19 +0000 (16:19 -0700)
Both variants of khugepaged_alloc_page() do up_read(&mm->mmap_sem)
first: no point keep it inside the function.

Link: http://lkml.kernel.org/r/1466021202-61880-33-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/khugepaged.c

index 3e6d1a1..639047c 100644 (file)
@@ -739,19 +739,10 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 }
 
 static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
-                      unsigned long address, int node)
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 {
        VM_BUG_ON_PAGE(*hpage, *hpage);
 
-       /*
-        * Before allocating the hugepage, release the mmap_sem read lock.
-        * The allocation can take potentially a long time if it involves
-        * sync compaction, and we do not need to hold the mmap_sem during
-        * that. We will recheck the vma after taking it again in write mode.
-        */
-       up_read(&mm->mmap_sem);
-
        *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
        if (unlikely(!*hpage)) {
                count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
@@ -812,10 +803,8 @@ static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
 }
 
 static struct page *
-khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
-                      unsigned long address, int node)
+khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
 {
-       up_read(&mm->mmap_sem);
        VM_BUG_ON(!*hpage);
 
        return  *hpage;
@@ -936,8 +925,14 @@ static void collapse_huge_page(struct mm_struct *mm,
        /* Only allocate from the target node */
        gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
 
-       /* release the mmap_sem read lock. */
-       new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
+       /*
+        * Before allocating the hugepage, release the mmap_sem read lock.
+        * The allocation can take potentially a long time if it involves
+        * sync compaction, and we do not need to hold the mmap_sem during
+        * that. We will recheck the vma after taking it again in write mode.
+        */
+       up_read(&mm->mmap_sem);
+       new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
                result = SCAN_ALLOC_HUGE_PAGE_FAIL;
                goto out_nolock;