mm/hugetlb: handle races in alloc_huge_page and hugetlb_reserve_pages
authorMike Kravetz <mike.kravetz@oracle.com>
Wed, 24 Jun 2015 23:57:58 +0000 (16:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 Jun 2015 00:49:44 +0000 (17:49 -0700)
alloc_huge_page and hugetlb_reserve_pages use region_chg to calculate the
number of pages which will be added to the reserve map.  Subpool and
global reserve counts are adjusted based on the output of region_chg.
Before the pages are actually added to the reserve map, these routines
could race and add fewer pages than expected.  If this happens, the
subpool and global reserve counts are not correct.

Compare the number of pages actually added (region_add) to those expected
to added (region_chg).  If fewer pages are actually added, this indicates
a race and adjust counters accordingly.

Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Davidlohr Bueso <dave@stgolabs.net>
Cc: David Rientjes <rientjes@google.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/hugetlb.c

index cd3fc41..75c0eef 100644 (file)
@@ -1542,7 +1542,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
        struct hugepage_subpool *spool = subpool_vma(vma);
        struct hstate *h = hstate_vma(vma);
        struct page *page;
-       long chg;
+       long chg, commit;
        int ret, idx;
        struct hugetlb_cgroup *h_cg;
 
@@ -1583,7 +1583,22 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        set_page_private(page, (unsigned long)spool);
 
-       vma_commit_reservation(h, vma, addr);
+       commit = vma_commit_reservation(h, vma, addr);
+       if (unlikely(chg > commit)) {
+               /*
+                * The page was added to the reservation map between
+                * vma_needs_reservation and vma_commit_reservation.
+                * This indicates a race with hugetlb_reserve_pages.
+                * Adjust for the subpool count incremented above AND
+                * in hugetlb_reserve_pages for the same page.  Also,
+                * the reservation count added in hugetlb_reserve_pages
+                * no longer applies.
+                */
+               long rsv_adjust;
+
+               rsv_adjust = hugepage_subpool_put_pages(spool, 1);
+               hugetlb_acct_memory(h, -rsv_adjust);
+       }
        return page;
 
 out_uncharge_cgroup:
@@ -3701,8 +3716,24 @@ int hugetlb_reserve_pages(struct inode *inode,
         * consumed reservations are stored in the map. Hence, nothing
         * else has to be done for private mappings here
         */
-       if (!vma || vma->vm_flags & VM_MAYSHARE)
-               region_add(resv_map, from, to);
+       if (!vma || vma->vm_flags & VM_MAYSHARE) {
+               long add = region_add(resv_map, from, to);
+
+               if (unlikely(chg > add)) {
+                       /*
+                        * pages in this range were added to the reserve
+                        * map between region_chg and region_add.  This
+                        * indicates a race with alloc_huge_page.  Adjust
+                        * the subpool and reserve counts modified above
+                        * based on the difference.
+                        */
+                       long rsv_adjust;
+
+                       rsv_adjust = hugepage_subpool_put_pages(spool,
+                                                               chg - add);
+                       hugetlb_acct_memory(h, -rsv_adjust);
+               }
+       }
        return 0;
 out_err:
        if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))