ipvs: Pass ipvs not net to stop_sync_thread
[deliverable/linux.git] / mm / rmap.c
index 326d5d89e45cdadeb93131c2841208e4c135e052..f5b5c1f3dcd755ae313bba1404f2c9b079d5c18f 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -59,6 +59,7 @@
 #include <linux/migrate.h>
 #include <linux/hugetlb.h>
 #include <linux/backing-dev.h>
+#include <linux/page_idle.h>
 
 #include <asm/tlbflush.h>
 
@@ -626,16 +627,34 @@ void try_to_unmap_flush(void)
        }
        cpumask_clear(&tlb_ubc->cpumask);
        tlb_ubc->flush_required = false;
+       tlb_ubc->writable = false;
        put_cpu();
 }
 
+/* Flush iff there are potentially writable TLB entries that can race with IO */
+void try_to_unmap_flush_dirty(void)
+{
+       struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+
+       if (tlb_ubc->writable)
+               try_to_unmap_flush();
+}
+
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
-               struct page *page)
+               struct page *page, bool writable)
 {
        struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
 
        cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
        tlb_ubc->flush_required = true;
+
+       /*
+        * If the PTE was dirty then it's best to assume it's writable. The
+        * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+        * before the page is queued for IO.
+        */
+       if (writable)
+               tlb_ubc->writable = true;
 }
 
 /*
@@ -658,7 +677,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
 }
 #else
 static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
-               struct page *page)
+               struct page *page, bool writable)
 {
 }
 
@@ -868,6 +887,11 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
                pte_unmap_unlock(pte, ptl);
        }
 
+       if (referenced)
+               clear_page_idle(page);
+       if (test_and_clear_page_young(page))
+               referenced++;
+
        if (referenced) {
                pra->referenced++;
                pra->vm_flags |= vma->vm_flags;
@@ -1315,11 +1339,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                 */
                pteval = ptep_get_and_clear(mm, address, pte);
 
-               /* Potentially writable TLBs must be flushed before IO */
-               if (pte_dirty(pteval))
-                       flush_tlb_page(vma, address);
-               else
-                       set_tlb_ubc_flush_pending(mm, page);
+               set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
        } else {
                pteval = ptep_clear_flush(vma, address, pte);
        }
This page took 0.029044 seconds and 5 git commands to generate.