mlock: do not hold mmap_sem for extended periods of time
[deliverable/linux.git] / mm / memory.c
index 02e48aa0ed136ff8e4d808d954a20d0b46e6d23d..1bbe9a22429cd442a5981a0c895a5cddd8c7fdfa 100644 (file)
@@ -1310,6 +1310,28 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                 */
                mark_page_accessed(page);
        }
+       if (flags & FOLL_MLOCK) {
+               /*
+                * The preliminary mapping check is mainly to avoid the
+                * pointless overhead of lock_page on the ZERO_PAGE
+                * which might bounce very badly if there is contention.
+                *
+                * If the page is already locked, we don't need to
+                * handle it now - vmscan will handle it later if and
+                * when it attempts to reclaim the page.
+                */
+               if (page->mapping && trylock_page(page)) {
+                       lru_add_drain();  /* push cached pages to LRU */
+                       /*
+                        * Because we lock page here and migration is
+                        * blocked by the pte's page reference, we need
+                        * only check for file-cache page truncation.
+                        */
+                       if (page->mapping)
+                               mlock_vma_page(page);
+                       unlock_page(page);
+               }
+       }
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out:
@@ -1341,7 +1363,8 @@ no_page_table:
 
 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                     unsigned long start, int nr_pages, unsigned int gup_flags,
-                    struct page **pages, struct vm_area_struct **vmas)
+                    struct page **pages, struct vm_area_struct **vmas,
+                    int *nonblocking)
 {
        int i;
        unsigned long vm_flags;
@@ -1441,10 +1464,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
+                               unsigned int fault_flags = 0;
+
+                               if (foll_flags & FOLL_WRITE)
+                                       fault_flags |= FAULT_FLAG_WRITE;
+                               if (nonblocking)
+                                       fault_flags |= FAULT_FLAG_ALLOW_RETRY;
 
                                ret = handle_mm_fault(mm, vma, start,
-                                       (foll_flags & FOLL_WRITE) ?
-                                       FAULT_FLAG_WRITE : 0);
+                                                       fault_flags);
 
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
@@ -1460,6 +1488,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                else
                                        tsk->min_flt++;
 
+                               if (ret & VM_FAULT_RETRY) {
+                                       *nonblocking = 0;
+                                       return i;
+                               }
+
                                /*
                                 * The VM_FAULT_WRITE bit tells us that
                                 * do_wp_page has broken COW when necessary,
@@ -1559,7 +1592,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        if (force)
                flags |= FOLL_FORCE;
 
-       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
+                               NULL);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -1584,7 +1618,8 @@ struct page *get_dump_page(unsigned long addr)
        struct page *page;
 
        if (__get_user_pages(current, current->mm, addr, 1,
-                       FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma) < 1)
+                            FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
+                            NULL) < 1)
                return NULL;
        flush_cache_page(vma, addr, page_to_pfn(page));
        return page;
@@ -2112,7 +2147,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct page *old_page, *new_page;
        pte_t entry;
-       int reuse = 0, ret = 0;
+       int ret = 0;
        int page_mkwrite = 0;
        struct page *dirty_page = NULL;
 
@@ -2149,14 +2184,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        page_cache_release(old_page);
                }
-               reuse = reuse_swap_page(old_page);
-               if (reuse)
+               if (reuse_swap_page(old_page)) {
                        /*
                         * The page is all ours.  Move it to our anon_vma so
                         * the rmap code will not search our parent or siblings.
                         * Protected against the rmap code by the page lock.
                         */
                        page_move_anon_rmap(old_page, vma, address);
+                       unlock_page(old_page);
+                       goto reuse;
+               }
                unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
@@ -2220,18 +2257,52 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                }
                dirty_page = old_page;
                get_page(dirty_page);
-               reuse = 1;
-       }
 
-       if (reuse) {
 reuse:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (ptep_set_access_flags(vma, address, page_table, entry,1))
                        update_mmu_cache(vma, address, page_table);
+               pte_unmap_unlock(page_table, ptl);
                ret |= VM_FAULT_WRITE;
-               goto unlock;
+
+               if (!dirty_page)
+                       return ret;
+
+               /*
+                * Yes, Virginia, this is actually required to prevent a race
+                * with clear_page_dirty_for_io() from clearing the page dirty
+                * bit after it clear all dirty ptes, but before a racing
+                * do_wp_page installs a dirty pte.
+                *
+                * do_no_page is protected similarly.
+                */
+               if (!page_mkwrite) {
+                       wait_on_page_locked(dirty_page);
+                       set_page_dirty_balance(dirty_page, page_mkwrite);
+               }
+               put_page(dirty_page);
+               if (page_mkwrite) {
+                       struct address_space *mapping = dirty_page->mapping;
+
+                       set_page_dirty(dirty_page);
+                       unlock_page(dirty_page);
+                       page_cache_release(dirty_page);
+                       if (mapping)    {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+               }
+
+               /* file_update_time outside page_lock */
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+
+               return ret;
        }
 
        /*
@@ -2337,39 +2408,6 @@ gotten:
                page_cache_release(old_page);
 unlock:
        pte_unmap_unlock(page_table, ptl);
-       if (dirty_page) {
-               /*
-                * Yes, Virginia, this is actually required to prevent a race
-                * with clear_page_dirty_for_io() from clearing the page dirty
-                * bit after it clear all dirty ptes, but before a racing
-                * do_wp_page installs a dirty pte.
-                *
-                * do_no_page is protected similarly.
-                */
-               if (!page_mkwrite) {
-                       wait_on_page_locked(dirty_page);
-                       set_page_dirty_balance(dirty_page, page_mkwrite);
-               }
-               put_page(dirty_page);
-               if (page_mkwrite) {
-                       struct address_space *mapping = dirty_page->mapping;
-
-                       set_page_dirty(dirty_page);
-                       unlock_page(dirty_page);
-                       page_cache_release(dirty_page);
-                       if (mapping)    {
-                               /*
-                                * Some device drivers do not set page.mapping
-                                * but still dirty their pages
-                                */
-                               balance_dirty_pages_ratelimited(mapping);
-                       }
-               }
-
-               /* file_update_time outside page_lock */
-               if (vma->vm_file)
-                       file_update_time(vma->vm_file);
-       }
        return ret;
 oom_free_new:
        page_cache_release(new_page);
@@ -3296,7 +3334,12 @@ int make_pages_present(unsigned long addr, unsigned long end)
        vma = find_vma(current->mm, addr);
        if (!vma)
                return -ENOMEM;
-       write = (vma->vm_flags & VM_WRITE) != 0;
+       /*
+        * We want to touch writable mappings with a write fault in order
+        * to break COW, except for shared mappings because these don't COW
+        * and we would not want to dirty them for nothing.
+        */
+       write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
        BUG_ON(addr >= end);
        BUG_ON(end > vma->vm_end);
        len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
This page took 0.032911 seconds and 5 git commands to generate.