ath10k: remove void pointer from struct ath10k_pci_compl
[cascardo/linux.git] / mm / memory.c
index 61a262b..1ce2e2a 100644 (file)
@@ -82,7 +82,6 @@ EXPORT_SYMBOL(max_mapnr);
 EXPORT_SYMBOL(mem_map);
 #endif
 
-unsigned long num_physpages;
 /*
  * A number of key systems in x86 including ioremap() rely on the assumption
  * that high_memory defines the upper bound on direct map memory, then end
@@ -92,7 +91,6 @@ unsigned long num_physpages;
  */
 void * high_memory;
 
-EXPORT_SYMBOL(num_physpages);
 EXPORT_SYMBOL(high_memory);
 
 /*
@@ -1101,6 +1099,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
        spinlock_t *ptl;
        pte_t *start_pte;
        pte_t *pte;
+       unsigned long range_start = addr;
 
 again:
        init_rss_vec(rss);
@@ -1151,7 +1150,7 @@ again:
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
                                if (pte_young(ptent) &&
-                                   likely(!VM_SequentialReadHint(vma)))
+                                   likely(!(vma->vm_flags & VM_SEQ_READ)))
                                        mark_page_accessed(page);
                                rss[MM_FILEPAGES]--;
                        }
@@ -1206,12 +1205,14 @@ again:
                force_flush = 0;
 
 #ifdef HAVE_GENERIC_MMU_GATHER
-               tlb->start = addr;
-               tlb->end = end;
+               tlb->start = range_start;
+               tlb->end = addr;
 #endif
                tlb_flush_mmu(tlb);
-               if (addr != end)
+               if (addr != end) {
+                       range_start = addr;
                        goto again;
+               }
        }
 
        return addr;
@@ -2904,7 +2905,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
                        details->first_index, details->last_index) {
 
                vba = vma->vm_pgoff;
-               vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
+               vea = vba + vma_pages(vma) - 1;
                /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
                zba = details->first_index;
                if (zba < vba)
@@ -4201,7 +4202,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
        up_read(&mm->mmap_sem);
 }
 
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
 void might_fault(void)
 {
        /*
@@ -4213,13 +4214,17 @@ void might_fault(void)
        if (segment_eq(get_fs(), KERNEL_DS))
                return;
 
-       might_sleep();
        /*
         * it would be nicer only to annotate paths which are not under
         * pagefault_disable, however that requires a larger audit and
         * providing helpers like get_user_atomic.
         */
-       if (!in_atomic() && current->mm)
+       if (in_atomic())
+               return;
+
+       __might_sleep(__FILE__, __LINE__, 0);
+
+       if (current->mm)
                might_lock_read(&current->mm->mmap_sem);
 }
 EXPORT_SYMBOL(might_fault);