usb: gadget: f_hid: add dev to configfs
[cascardo/linux.git] / mm / hugetlb.c
index cc2a99e..87e11d8 100644 (file)
@@ -1448,6 +1448,7 @@ static void dissolve_free_huge_page(struct page *page)
                list_del(&page->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
+               h->max_huge_pages--;
                update_and_free_page(h, page);
        }
        spin_unlock(&hugetlb_lock);
@@ -2216,6 +2217,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
                 * and reducing the surplus.
                 */
                spin_unlock(&hugetlb_lock);
+
+               /* yield cpu to avoid soft lockup */
+               cond_resched();
+
                if (hstate_is_gigantic(h))
                        ret = alloc_fresh_gigantic_page(h, nodes_allowed);
                else
@@ -3179,7 +3184,6 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                            unsigned long start, unsigned long end,
                            struct page *ref_page)
 {
-       int force_flush = 0;
        struct mm_struct *mm = vma->vm_mm;
        unsigned long address;
        pte_t *ptep;
@@ -3198,19 +3202,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        tlb_start_vma(tlb, vma);
        mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
        address = start;
-again:
        for (; address < end; address += sz) {
                ptep = huge_pte_offset(mm, address);
                if (!ptep)
                        continue;
 
                ptl = huge_pte_lock(h, mm, ptep);
-               if (huge_pmd_unshare(mm, &address, ptep))
-                       goto unlock;
+               if (huge_pmd_unshare(mm, &address, ptep)) {
+                       spin_unlock(ptl);
+                       continue;
+               }
 
                pte = huge_ptep_get(ptep);
-               if (huge_pte_none(pte))
-                       goto unlock;
+               if (huge_pte_none(pte)) {
+                       spin_unlock(ptl);
+                       continue;
+               }
 
                /*
                 * Migrating hugepage or HWPoisoned hugepage is already
@@ -3218,7 +3225,8 @@ again:
                 */
                if (unlikely(!pte_present(pte))) {
                        huge_pte_clear(mm, address, ptep);
-                       goto unlock;
+                       spin_unlock(ptl);
+                       continue;
                }
 
                page = pte_page(pte);
@@ -3228,9 +3236,10 @@ again:
                 * are about to unmap is the actual page of interest.
                 */
                if (ref_page) {
-                       if (page != ref_page)
-                               goto unlock;
-
+                       if (page != ref_page) {
+                               spin_unlock(ptl);
+                               continue;
+                       }
                        /*
                         * Mark the VMA as having unmapped its page so that
                         * future faults in this VMA will fail rather than
@@ -3246,30 +3255,14 @@ again:
 
                hugetlb_count_sub(pages_per_huge_page(h), mm);
                page_remove_rmap(page, true);
-               force_flush = !__tlb_remove_page(tlb, page);
-               if (force_flush) {
-                       address += sz;
-                       spin_unlock(ptl);
-                       break;
-               }
-               /* Bail out after unmapping reference page if supplied */
-               if (ref_page) {
-                       spin_unlock(ptl);
-                       break;
-               }
-unlock:
+
                spin_unlock(ptl);
-       }
-       /*
-        * mmu_gather ran out of room to batch pages, we break out of
-        * the PTE lock to avoid doing the potential expensive TLB invalidate
-        * and page-free while holding it.
-        */
-       if (force_flush) {
-               force_flush = 0;
-               tlb_flush_mmu(tlb);
-               if (address < end && !ref_page)
-                       goto again;
+               tlb_remove_page_size(tlb, page, huge_page_size(h));
+               /*
+                * Bail out after unmapping reference page if supplied
+                */
+               if (ref_page)
+                       break;
        }
        mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
        tlb_end_vma(tlb, vma);
@@ -3328,7 +3321,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
        address = address & huge_page_mask(h);
        pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
                        vma->vm_pgoff;
-       mapping = file_inode(vma->vm_file)->i_mapping;
+       mapping = vma->vm_file->f_mapping;
 
        /*
         * Take the mapping lock for the duration of the table walk. As
@@ -3950,6 +3943,14 @@ same_page:
        return i ? i : -EFAULT;
 }
 
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
+/*
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
+ * implement this.
+ */
+#define flush_hugetlb_tlb_range(vma, addr, end)        flush_tlb_range(vma, addr, end)
+#endif
+
 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot)
 {
@@ -4010,7 +4011,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
         * once we release i_mmap_rwsem, another task can do the final put_page
         * and that page table be reused and filled with junk.
         */
-       flush_tlb_range(vma, start, end);
+       flush_hugetlb_tlb_range(vma, start, end);
        mmu_notifier_invalidate_range(mm, start, end);
        i_mmap_unlock_write(vma->vm_file->f_mapping);
        mmu_notifier_invalidate_range_end(mm, start, end);
@@ -4318,7 +4319,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
                                pte = (pte_t *)pmd_alloc(mm, pud, addr);
                }
        }
-       BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+       BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
 
        return pte;
 }
@@ -4403,7 +4404,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
 
 /*
  * This function is called from memory failure code.
- * Assume the caller holds page lock of the head page.
  */
 int dequeue_hwpoisoned_huge_page(struct page *hpage)
 {