staging: ks7010: bail out when registering netdevice fails
[cascardo/linux.git] / mm / huge_memory.c
index b49ee12..9ed5853 100644 (file)
@@ -89,6 +89,7 @@ static unsigned int khugepaged_full_scans;
 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
 /* during fragmentation poll the hugepage allocator once every minute */
 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
+static unsigned long khugepaged_sleep_expire;
 static struct task_struct *khugepaged_thread __read_mostly;
 static DEFINE_MUTEX(khugepaged_mutex);
 static DEFINE_SPINLOCK(khugepaged_mm_lock);
@@ -467,6 +468,7 @@ static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
                return -EINVAL;
 
        khugepaged_scan_sleep_millisecs = msecs;
+       khugepaged_sleep_expire = 0;
        wake_up_interruptible(&khugepaged_wait);
 
        return count;
@@ -494,6 +496,7 @@ static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
                return -EINVAL;
 
        khugepaged_alloc_sleep_millisecs = msecs;
+       khugepaged_sleep_expire = 0;
        wake_up_interruptible(&khugepaged_wait);
 
        return count;
@@ -764,10 +767,7 @@ pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
 
 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
 {
-       pmd_t entry;
-       entry = mk_pmd(page, prot);
-       entry = pmd_mkhuge(entry);
-       return entry;
+       return pmd_mkhuge(mk_pmd(page, prot));
 }
 
 static inline struct list_head *page_deferred_list(struct page *page)
@@ -1013,6 +1013,7 @@ int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
        insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
        return VM_FAULT_NOPAGE;
 }
+EXPORT_SYMBOL_GPL(vmf_insert_pfn_pmd);
 
 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
                pmd_t *pmd)
@@ -1698,20 +1699,17 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        return 1;
 }
 
-bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
-                 unsigned long old_addr,
+bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
                  unsigned long new_addr, unsigned long old_end,
                  pmd_t *old_pmd, pmd_t *new_pmd)
 {
        spinlock_t *old_ptl, *new_ptl;
        pmd_t pmd;
-
        struct mm_struct *mm = vma->vm_mm;
 
        if ((old_addr & ~HPAGE_PMD_MASK) ||
            (new_addr & ~HPAGE_PMD_MASK) ||
-           old_end - old_addr < HPAGE_PMD_SIZE ||
-           (new_vma->vm_flags & VM_NOHUGEPAGE))
+           old_end - old_addr < HPAGE_PMD_SIZE)
                return false;
 
        /*
@@ -2797,15 +2795,25 @@ static void khugepaged_do_scan(void)
                put_page(hpage);
 }
 
+static bool khugepaged_should_wakeup(void)
+{
+       return kthread_should_stop() ||
+              time_after_eq(jiffies, khugepaged_sleep_expire);
+}
+
 static void khugepaged_wait_work(void)
 {
        if (khugepaged_has_work()) {
-               if (!khugepaged_scan_sleep_millisecs)
+               const unsigned long scan_sleep_jiffies =
+                       msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
+
+               if (!scan_sleep_jiffies)
                        return;
 
+               khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
                wait_event_freezable_timeout(khugepaged_wait,
-                                            kthread_should_stop(),
-                       msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
+                                            khugepaged_should_wakeup(),
+                                            scan_sleep_jiffies);
                return;
        }
 
@@ -3029,8 +3037,10 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                return;
 
        /*
-        * Caller holds the mmap_sem write mode, so a huge pmd cannot
-        * materialize from under us.
+        * Caller holds the mmap_sem write mode or the anon_vma lock,
+        * so a huge pmd cannot materialize from under us (khugepaged
+        * holds both the mmap_sem write mode and the anon_vma lock
+        * write mode).
         */
        __split_huge_pmd(vma, pmd, address, freeze);
 }
@@ -3113,7 +3123,7 @@ static void __split_huge_page_tail(struct page *head, int tail,
        VM_BUG_ON_PAGE(page_ref_count(page_tail) != 0, page_tail);
 
        /*
-        * tail_page->_count is zero and not changing from under us. But
+        * tail_page->_refcount is zero and not changing from under us. But
         * get_page_unless_zero() may be running from under us on the
         * tail_page. If we used atomic_set() below instead of atomic_inc(), we
         * would then run atomic_set() concurrently with
@@ -3340,7 +3350,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        if (mlocked)
                lru_add_drain();
 
-       /* Prevent deferred_split_scan() touching ->_count */
+       /* Prevent deferred_split_scan() touching ->_refcount */
        spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        count = page_count(head);
        mapcount = total_mapcount(head);