mm: consolidate code to setup pte
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 3 Apr 2014 21:48:16 +0000 (14:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Apr 2014 23:21:04 +0000 (16:21 -0700)
Extract and consolidate code to setup pte from do_read_fault(),
do_cow_fault() and do_shared_fault().

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index ac3990d77ec94b01915dad672168b4a29f1160bc..90cea22001ef32e0ba19af3c02d70e1a2f3f1604 100644 (file)
@@ -3318,13 +3318,37 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
        return ret;
 }
 
+static void do_set_pte(struct vm_area_struct *vma, unsigned long address,
+               struct page *page, pte_t *pte, bool write, bool anon)
+{
+       pte_t entry;
+
+       flush_icache_page(vma, page);
+       entry = mk_pte(page, vma->vm_page_prot);
+       if (write)
+               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+       else if (pte_file(*pte) && pte_file_soft_dirty(*pte))
+               pte_mksoft_dirty(entry);
+       if (anon) {
+               inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
+               page_add_new_anon_rmap(page, vma, address);
+       } else {
+               inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
+               page_add_file_rmap(page);
+       }
+       set_pte_at(vma->vm_mm, address, pte, entry);
+
+       /* no need to invalidate: a not-present page won't be cached */
+       update_mmu_cache(vma, address, pte);
+}
+
 static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pmd_t *pmd,
                pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
 {
        struct page *fault_page;
        spinlock_t *ptl;
-       pte_t entry, *pte;
+       pte_t *pte;
        int ret;
 
        ret = __do_fault(vma, address, pgoff, flags, &fault_page);
@@ -3338,20 +3362,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                page_cache_release(fault_page);
                return ret;
        }
-
-       flush_icache_page(vma, fault_page);
-       entry = mk_pte(fault_page, vma->vm_page_prot);
-       if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
-               pte_mksoft_dirty(entry);
-       inc_mm_counter_fast(mm, MM_FILEPAGES);
-       page_add_file_rmap(fault_page);
-       set_pte_at(mm, address, pte, entry);
-
-       /* no need to invalidate: a not-present page won't be cached */
-       update_mmu_cache(vma, address, pte);
+       do_set_pte(vma, address, fault_page, pte, false, false);
        pte_unmap_unlock(pte, ptl);
        unlock_page(fault_page);
-
        return ret;
 }
 
@@ -3361,7 +3374,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct page *fault_page, *new_page;
        spinlock_t *ptl;
-       pte_t entry, *pte;
+       pte_t *pte;
        int ret;
 
        if (unlikely(anon_vma_prepare(vma)))
@@ -3390,17 +3403,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                page_cache_release(fault_page);
                goto uncharge_out;
        }
-
-       flush_icache_page(vma, new_page);
-       entry = mk_pte(new_page, vma->vm_page_prot);
-       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-       inc_mm_counter_fast(mm, MM_ANONPAGES);
-       page_add_new_anon_rmap(new_page, vma, address);
-       set_pte_at(mm, address, pte, entry);
-
-       /* no need to invalidate: a not-present page won't be cached */
-       update_mmu_cache(vma, address, pte);
-
+       do_set_pte(vma, address, new_page, pte, true, true);
        pte_unmap_unlock(pte, ptl);
        unlock_page(fault_page);
        page_cache_release(fault_page);
@@ -3418,7 +3421,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *fault_page;
        struct address_space *mapping;
        spinlock_t *ptl;
-       pte_t entry, *pte;
+       pte_t *pte;
        int dirtied = 0;
        int ret, tmp;
 
@@ -3447,16 +3450,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                page_cache_release(fault_page);
                return ret;
        }
-
-       flush_icache_page(vma, fault_page);
-       entry = mk_pte(fault_page, vma->vm_page_prot);
-       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-       inc_mm_counter_fast(mm, MM_FILEPAGES);
-       page_add_file_rmap(fault_page);
-       set_pte_at(mm, address, pte, entry);
-
-       /* no need to invalidate: a not-present page won't be cached */
-       update_mmu_cache(vma, address, pte);
+       do_set_pte(vma, address, fault_page, pte, true, false);
        pte_unmap_unlock(pte, ptl);
 
        if (set_page_dirty(fault_page))
This page took 0.043879 seconds and 5 git commands to generate.