mm: fix TLB flush race between migration, and change_protection_range
[deliverable/linux.git] / mm / huge_memory.c
index 7de1bf85f6833422e16161445b71e328fad2e1f6..3d2783e10596ac1fc7124e39444f48add28c9b9c 100644 (file)
@@ -1376,6 +1376,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto clear_pmdnuma;
        }
 
+       /*
+        * The page_table_lock above provides a memory barrier
+        * with change_protection_range.
+        */
+       if (mm_tlb_flush_pending(mm))
+               flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
+
        /*
         * Migrate the THP to the requested node, returns with page unlocked
         * and pmd_numa cleared.
This page took 0.025406 seconds and 5 git commands to generate.