Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / mm / mlock.c
index 569400a5d07901d39d63ee1d225fe52837e8c386..e6638f565d42669a17989e45450b23c2ac10662a 100644 (file)
@@ -160,7 +160,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
 {
        struct mm_struct *mm = vma->vm_mm;
        unsigned long addr = start;
-       int nr_pages = (end - start) / PAGE_SIZE;
+       unsigned long nr_pages = (end - start) / PAGE_SIZE;
        int gup_flags;
 
        VM_BUG_ON(start & ~PAGE_MASK);
@@ -185,6 +185,10 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
                gup_flags |= FOLL_FORCE;
 
+       /*
+        * We made sure addr is within a VMA, so the following will
+        * not result in a stack expansion that recurses back here.
+        */
        return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
                                NULL, NULL, nonblocking);
 }
@@ -340,9 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
 
                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
-               newflags = vma->vm_flags VM_LOCKED;
-               if (!on)
-                       newflags &= ~VM_LOCKED;
+               newflags = vma->vm_flags & ~VM_LOCKED;
+               if (on)
+                       newflags |= VM_LOCKED | VM_POPULATE;
 
                tmp = vma->vm_end;
                if (tmp > end)
@@ -378,7 +382,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
        unsigned long end, nstart, nend;
        struct vm_area_struct *vma = NULL;
        int locked = 0;
-       int ret = 0;
+       long ret = 0;
 
        VM_BUG_ON(start & ~PAGE_MASK);
        VM_BUG_ON(len != PAGE_ALIGN(len));
@@ -402,7 +406,8 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
                 * range with the first VMA. Also, skip undesirable VMA types.
                 */
                nend = min(end, vma->vm_end);
-               if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+               if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_POPULATE)) !=
+                   VM_POPULATE)
                        continue;
                if (nstart < vma->vm_start)
                        nstart = vma->vm_start;
@@ -475,18 +480,18 @@ static int do_mlockall(int flags)
        struct vm_area_struct * vma, * prev = NULL;
 
        if (flags & MCL_FUTURE)
-               current->mm->def_flags |= VM_LOCKED;
+               current->mm->def_flags |= VM_LOCKED | VM_POPULATE;
        else
-               current->mm->def_flags &= ~VM_LOCKED;
+               current->mm->def_flags &= ~(VM_LOCKED | VM_POPULATE);
        if (flags == MCL_FUTURE)
                goto out;
 
        for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
                vm_flags_t newflags;
 
-               newflags = vma->vm_flags VM_LOCKED;
-               if (!(flags & MCL_CURRENT))
-                       newflags &= ~VM_LOCKED;
+               newflags = vma->vm_flags & ~VM_LOCKED;
+               if (flags & MCL_CURRENT)
+                       newflags |= VM_LOCKED | VM_POPULATE;
 
                /* Ignore errors */
                mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
This page took 0.028796 seconds and 5 git commands to generate.