nmi_backtrace: generate one-line reports for idle cpus
[cascardo/linux.git] / mm / mlock.c
index 14645be..145a425 100644 (file)
@@ -516,6 +516,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
        int nr_pages;
        int ret = 0;
        int lock = !!(newflags & VM_LOCKED);
+       vm_flags_t old_flags = vma->vm_flags;
 
        if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
            is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
@@ -550,6 +551,8 @@ success:
        nr_pages = (end - start) >> PAGE_SHIFT;
        if (!lock)
                nr_pages = -nr_pages;
+       else if (old_flags & VM_LOCKED)
+               nr_pages = 0;
        mm->locked_vm += nr_pages;
 
        /*
@@ -617,6 +620,45 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
        return error;
 }
 
+/*
+ * Go through vma areas and sum size of mlocked
+ * vma pages, as return value.
+ * Note deferred memory locking case(mlock2(,,MLOCK_ONFAULT)
+ * is also counted.
+ * Return value: previously mlocked page counts
+ */
+static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+               unsigned long start, size_t len)
+{
+       struct vm_area_struct *vma;
+       int count = 0;
+
+       if (mm == NULL)
+               mm = current->mm;
+
+       vma = find_vma(mm, start);
+       if (vma == NULL)
+               vma = mm->mmap;
+
+       for (; vma ; vma = vma->vm_next) {
+               if (start >= vma->vm_end)
+                       continue;
+               if (start + len <=  vma->vm_start)
+                       break;
+               if (vma->vm_flags & VM_LOCKED) {
+                       if (start > vma->vm_start)
+                               count -= (start - vma->vm_start);
+                       if (start + len < vma->vm_end) {
+                               count += start + len - vma->vm_start;
+                               break;
+                       }
+                       count += vma->vm_end - vma->vm_start;
+               }
+       }
+
+       return count >> PAGE_SHIFT;
+}
+
 static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t flags)
 {
        unsigned long locked;
@@ -639,6 +681,16 @@ static __must_check int do_mlock(unsigned long start, size_t len, vm_flags_t fla
                return -EINTR;
 
        locked += current->mm->locked_vm;
+       if ((locked > lock_limit) && (!capable(CAP_IPC_LOCK))) {
+               /*
+                * It is possible that the regions requested intersect with
+                * previously mlocked areas, that part area in "mm->locked_vm"
+                * should not be counted to new mlock increment count. So check
+                * and adjust locked count if necessary.
+                */
+               locked -= count_mm_mlocked_page_nr(current->mm,
+                               start, len);
+       }
 
        /* check against resource limits */
        if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))