mm: remove unnecessary condition in remove_inode_hugepages
authorzhong jiang <zhongjiang@huawei.com>
Sat, 8 Oct 2016 00:02:01 +0000 (17:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 8 Oct 2016 01:46:29 +0000 (18:46 -0700)
When the huge page is added to the page cahce (huge_add_to_page_cache),
the page private flag will be cleared.  since this code
(remove_inode_hugepages) will only be called for pages in the page
cahce, PagePrivate(page) will always be false.

The patch remove the code without any functional change.

Link: http://lkml.kernel.org/r/1475113323-29368-1-git-send-email-zhongjiang@huawei.com
Signed-off-by: zhong jiang <zhongjiang@huawei.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Tested-by: Mike Kravetz <mike.kravetz@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/hugetlbfs/inode.c
include/linux/hugetlb.h
mm/hugetlb.c

index 4ea71eb..7337cac 100644 (file)
@@ -416,7 +416,6 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
 
                for (i = 0; i < pagevec_count(&pvec); ++i) {
                        struct page *page = pvec.pages[i];
-                       bool rsv_on_error;
                        u32 hash;
 
                        /*
@@ -458,18 +457,17 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
                         * cache (remove_huge_page) BEFORE removing the
                         * region/reserve map (hugetlb_unreserve_pages).  In
                         * rare out of memory conditions, removal of the
-                        * region/reserve map could fail.  Before free'ing
-                        * the page, note PagePrivate which is used in case
-                        * of error.
+                        * region/reserve map could fail. Correspondingly,
+                        * the subpool and global reserve usage count can need
+                        * to be adjusted.
                         */
-                       rsv_on_error = !PagePrivate(page);
+                       VM_BUG_ON(PagePrivate(page));
                        remove_huge_page(page);
                        freed++;
                        if (!truncate_op) {
                                if (unlikely(hugetlb_unreserve_pages(inode,
                                                        next, next + 1, 1)))
-                                       hugetlb_fix_reserve_counts(inode,
-                                                               rsv_on_error);
+                                       hugetlb_fix_reserve_counts(inode);
                        }
 
                        unlock_page(page);
index fe99e6f..48c76d6 100644 (file)
@@ -90,7 +90,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 void putback_active_hugepage(struct page *page);
 void free_huge_page(struct page *page);
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
+void hugetlb_fix_reserve_counts(struct inode *inode);
 extern struct mutex *hugetlb_fault_mutex_table;
 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
                                struct vm_area_struct *vma,
index e4a4500..ec49d9e 100644 (file)
@@ -567,13 +567,13 @@ retry:
  * appear as a "reserved" entry instead of simply dangling with incorrect
  * counts.
  */
-void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
+void hugetlb_fix_reserve_counts(struct inode *inode)
 {
        struct hugepage_subpool *spool = subpool_inode(inode);
        long rsv_adjust;
 
        rsv_adjust = hugepage_subpool_get_pages(spool, 1);
-       if (restore_reserve && rsv_adjust) {
+       if (rsv_adjust) {
                struct hstate *h = hstate_inode(inode);
 
                hugetlb_acct_memory(h, 1);