mm: numa: do not trap faults on the huge zero page
[deliverable/linux.git] / mm / mprotect.c
index 76824d73380da16f075f0a7ca80933d909d49613..dd599fc235c2fe5ce483128bb89dfad29dc104cd 100644 (file)
@@ -76,6 +76,18 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                if (pte_present(oldpte)) {
                        pte_t ptent;
 
+                       /*
+                        * Avoid trapping faults against the zero or KSM
+                        * pages. See similar comment in change_huge_pmd.
+                        */
+                       if (prot_numa) {
+                               struct page *page;
+
+                               page = vm_normal_page(vma, addr, oldpte);
+                               if (!page || PageKsm(page))
+                                       continue;
+                       }
+
                        ptent = ptep_modify_prot_start(mm, addr, pte);
                        ptent = pte_modify(ptent, newprot);
 
@@ -142,7 +154,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
                                split_huge_page_pmd(vma, addr, pmd);
                        else {
                                int nr_ptes = change_huge_pmd(vma, pmd, addr,
-                                               newprot);
+                                               newprot, prot_numa);
 
                                if (nr_ptes) {
                                        if (nr_ptes == HPAGE_PMD_NR) {
This page took 0.027887 seconds and 5 git commands to generate.