ARM: sunxi_defconfig: enable CONFIG_REGULATOR
[cascardo/linux.git] / mm / mmap.c
index c0a3637..7f85520 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -70,7 +70,7 @@ static void unmap_region(struct mm_struct *mm,
  * MAP_SHARED  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  *             w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
- *             
+ *
  * MAP_PRIVATE r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
  *             w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
  *             x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
@@ -89,6 +89,25 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
 }
 EXPORT_SYMBOL(vm_get_page_prot);
 
+static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
+{
+       return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
+}
+
+/* Update vma->vm_page_prot to reflect vma->vm_flags. */
+void vma_set_page_prot(struct vm_area_struct *vma)
+{
+       unsigned long vm_flags = vma->vm_flags;
+
+       vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
+       if (vma_wants_writenotify(vma)) {
+               vm_flags &= ~VM_SHARED;
+               vma->vm_page_prot = vm_pgprot_modify(vma->vm_page_prot,
+                                                    vm_flags);
+       }
+}
+
+
 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;  /* heuristic overcommit */
 int sysctl_overcommit_ratio __read_mostly = 50;        /* default is 50% */
 unsigned long sysctl_overcommit_kbytes __read_mostly;
@@ -268,7 +287,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len);
 
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
-       unsigned long rlim, retval;
+       unsigned long retval;
        unsigned long newbrk, oldbrk;
        struct mm_struct *mm = current->mm;
        unsigned long min_brk;
@@ -298,9 +317,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
         * segment grow beyond its set limit the in case where the limit is
         * not page aligned -Ram Gupta
         */
-       rlim = rlimit(RLIMIT_DATA);
-       if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
-                       (mm->end_data - mm->start_data) > rlim)
+       if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
+                             mm->end_data, mm->start_data))
                goto out;
 
        newbrk = PAGE_ALIGN(brk);
@@ -369,16 +387,18 @@ static int browse_rb(struct rb_root *root)
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
                if (vma->vm_start < prev) {
-                       pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev);
+                       pr_emerg("vm_start %lx < prev %lx\n",
+                                 vma->vm_start, prev);
                        bug = 1;
                }
                if (vma->vm_start < pend) {
-                       pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend);
+                       pr_emerg("vm_start %lx < pend %lx\n",
+                                 vma->vm_start, pend);
                        bug = 1;
                }
                if (vma->vm_start > vma->vm_end) {
-                       pr_emerg("vm_end %lx < vm_start %lx\n",
-                               vma->vm_end, vma->vm_start);
+                       pr_emerg("vm_start %lx > vm_end %lx\n",
+                                 vma->vm_start, vma->vm_end);
                        bug = 1;
                }
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
@@ -409,8 +429,9 @@ static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
        for (nd = rb_first(root); nd; nd = rb_next(nd)) {
                struct vm_area_struct *vma;
                vma = rb_entry(nd, struct vm_area_struct, vm_rb);
-               BUG_ON(vma != ignore &&
-                      vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
+               VM_BUG_ON_VMA(vma != ignore &&
+                       vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
+                       vma);
        }
 }
 
@@ -420,8 +441,10 @@ static void validate_mm(struct mm_struct *mm)
        int i = 0;
        unsigned long highest_address = 0;
        struct vm_area_struct *vma = mm->mmap;
+
        while (vma) {
                struct anon_vma_chain *avc;
+
                vma_lock_anon_vma(vma);
                list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
                        anon_vma_interval_tree_verify(avc);
@@ -436,15 +459,16 @@ static void validate_mm(struct mm_struct *mm)
        }
        if (highest_address != mm->highest_vm_end) {
                pr_emerg("mm->highest_vm_end %lx, found %lx\n",
-                      mm->highest_vm_end, highest_address);
+                         mm->highest_vm_end, highest_address);
                bug = 1;
        }
        i = browse_rb(&mm->mm_rb);
        if (i != mm->map_count) {
-               pr_emerg("map_count %d rb %d\n", mm->map_count, i);
+               if (i != -1)
+                       pr_emerg("map_count %d rb %d\n", mm->map_count, i);
                bug = 1;
        }
-       BUG_ON(bug);
+       VM_BUG_ON_MM(bug, mm);
 }
 #else
 #define validate_mm_rb(root, ignore) do { } while (0)
@@ -741,7 +765,7 @@ again:                      remove_next = 1 + (end > next->vm_end);
                         * split_vma inserting another: so it must be
                         * mprotect case 4 shifting the boundary down.
                         */
-                       adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
+                       adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
                        exporter = vma;
                        importer = next;
                }
@@ -787,8 +811,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
        if (!anon_vma && adjust_next)
                anon_vma = next->anon_vma;
        if (anon_vma) {
-               VM_BUG_ON(adjust_next && next->anon_vma &&
-                         anon_vma != next->anon_vma);
+               VM_BUG_ON_VMA(adjust_next && next->anon_vma &&
+                         anon_vma != next->anon_vma, next);
                anon_vma_lock_write(anon_vma);
                anon_vma_interval_tree_pre_update_vma(vma);
                if (adjust_next)
@@ -1010,7 +1034,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 struct vm_area_struct *vma_merge(struct mm_struct *mm,
                        struct vm_area_struct *prev, unsigned long addr,
                        unsigned long end, unsigned long vm_flags,
-                       struct anon_vma *anon_vma, struct file *file,
+                       struct anon_vma *anon_vma, struct file *file,
                        pgoff_t pgoff, struct mempolicy *policy)
 {
        pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
@@ -1036,7 +1060,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
         * Can it merge with the predecessor?
         */
        if (prev && prev->vm_end == addr &&
-                       mpol_equal(vma_policy(prev), policy) &&
+                       mpol_equal(vma_policy(prev), policy) &&
                        can_vma_merge_after(prev, vm_flags,
                                                anon_vma, file, pgoff)) {
                /*
@@ -1064,7 +1088,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
         * Can this new request be merged in front of next?
         */
        if (next && end == next->vm_start &&
-                       mpol_equal(policy, vma_policy(next)) &&
+                       mpol_equal(policy, vma_policy(next)) &&
                        can_vma_merge_before(next, vm_flags,
                                        anon_vma, file, pgoff+pglen)) {
                if (prev && addr < prev->vm_end)        /* case 4 */
@@ -1235,7 +1259,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
                        unsigned long flags, unsigned long pgoff,
                        unsigned long *populate)
 {
-       struct mm_struct * mm = current->mm;
+       struct mm_struct *mm = current->mm;
        vm_flags_t vm_flags;
 
        *populate = 0;
@@ -1263,7 +1287,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 
        /* offset overflow? */
        if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
-               return -EOVERFLOW;
+               return -EOVERFLOW;
 
        /* Too many mappings? */
        if (mm->map_count > sysctl_max_map_count)
@@ -1470,11 +1494,16 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
        if (vma->vm_ops && vma->vm_ops->page_mkwrite)
                return 1;
 
-       /* The open routine did something to the protections already? */
+       /* The open routine did something to the protections that pgprot_modify
+        * won't preserve? */
        if (pgprot_val(vma->vm_page_prot) !=
-           pgprot_val(vm_get_page_prot(vm_flags)))
+           pgprot_val(vm_pgprot_modify(vma->vm_page_prot, vm_flags)))
                return 0;
 
+       /* Do we need to track softdirty? */
+       if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
+               return 1;
+
        /* Specialty mapping? */
        if (vm_flags & VM_PFNMAP)
                return 0;
@@ -1610,21 +1639,6 @@ munmap_back:
                        goto free_vma;
        }
 
-       if (vma_wants_writenotify(vma)) {
-               pgprot_t pprot = vma->vm_page_prot;
-
-               /* Can vma->vm_page_prot have changed??
-                *
-                * Answer: Yes, drivers may have changed it in their
-                *         f_op->mmap method.
-                *
-                * Ensures that vmas marked as uncached stay that way.
-                */
-               vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
-               if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
-                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       }
-
        vma_link(mm, vma, prev, rb_link, rb_parent);
        /* Once vma denies write, undo our temporary denial count */
        if (file) {
@@ -1658,6 +1672,8 @@ out:
         */
        vma->vm_flags |= VM_SOFTDIRTY;
 
+       vma_set_page_prot(vma);
+
        return addr;
 
 unmap_and_free_vma:
@@ -1921,7 +1937,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.align_mask = 0;
        return vm_unmapped_area(&info);
 }
-#endif 
+#endif
 
 /*
  * This mmap-allocator allocates new areas top-down from below the
@@ -2321,13 +2337,13 @@ int expand_stack(struct vm_area_struct *vma, unsigned long address)
 }
 
 struct vm_area_struct *
-find_extend_vma(struct mm_struct * mm, unsigned long addr)
+find_extend_vma(struct mm_struct *mm, unsigned long addr)
 {
-       struct vm_area_struct * vma;
+       struct vm_area_struct *vma;
        unsigned long start;
 
        addr &= PAGE_MASK;
-       vma = find_vma(mm,addr);
+       vma = find_vma(mm, addr);
        if (!vma)
                return NULL;
        if (vma->vm_start <= addr)
@@ -2376,7 +2392,7 @@ static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end)
 {
-       struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
+       struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
        struct mmu_gather tlb;
 
        lru_add_drain();
@@ -2423,7 +2439,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
  * __split_vma() bypasses sysctl_max_map_count checking.  We use this on the
  * munmap path where it doesn't make sense to fail.
  */
-static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
+static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
              unsigned long addr, int new_below)
 {
        struct vm_area_struct *new;
@@ -2512,7 +2528,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
        if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
                return -EINVAL;
 
-       if ((len = PAGE_ALIGN(len)) == 0)
+       len = PAGE_ALIGN(len);
+       if (len == 0)
                return -EINVAL;
 
        /* Find the first overlapping VMA */
@@ -2558,7 +2575,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
                if (error)
                        return error;
        }
-       vma = prev? prev->vm_next: mm->mmap;
+       vma = prev ? prev->vm_next : mm->mmap;
 
        /*
         * unlock any mlock()ed ranges before detaching vmas
@@ -2621,10 +2638,10 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
  */
 static unsigned long do_brk(unsigned long addr, unsigned long len)
 {
-       struct mm_struct * mm = current->mm;
-       struct vm_area_struct * vma, * prev;
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma, *prev;
        unsigned long flags;
-       struct rb_node ** rb_link, * rb_parent;
+       struct rb_node **rb_link, *rb_parent;
        pgoff_t pgoff = addr >> PAGE_SHIFT;
        int error;
 
@@ -2848,7 +2865,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                         * safe. It is only safe to keep the vm_pgoff
                         * linear if there are no pages mapped yet.
                         */
-                       VM_BUG_ON(faulted_in_anon_vma);
+                       VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
                        *vmap = vma = new_vma;
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
@@ -3196,7 +3213,7 @@ void __init mmap_init(void)
 {
        int ret;
 
-       ret = percpu_counter_init(&vm_committed_as, 0);
+       ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
        VM_BUG_ON(ret);
 }