Merge tag 'arc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[deliverable/linux.git] / mm / memory.c
index 4b5a282e110739d957cd810c02179d63970ce911..6efe36a998bae484789441e3ea47e3b38b2ea346 100644 (file)
@@ -235,10 +235,8 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
 
 static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
-       if (!tlb->end)
-               return;
-
        tlb_flush(tlb);
+       mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
        tlb_table_flush(tlb);
 #endif
@@ -258,6 +256,9 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
 
 void tlb_flush_mmu(struct mmu_gather *tlb)
 {
+       if (!tlb->end)
+               return;
+
        tlb_flush_mmu_tlbonly(tlb);
        tlb_flush_mmu_free(tlb);
 }
@@ -1326,9 +1327,9 @@ static void unmap_single_vma(struct mmu_gather *tlb,
                         * safe to do nothing in this case.
                         */
                        if (vma->vm_file) {
-                               mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
+                               i_mmap_lock_write(vma->vm_file->f_mapping);
                                __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
-                               mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
+                               i_mmap_unlock_write(vma->vm_file->f_mapping);
                        }
                } else
                        unmap_page_range(tlb, vma, start, end, details);
@@ -2220,7 +2221,7 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush(vma, address, page_table);
+               ptep_clear_flush_notify(vma, address, page_table);
                page_add_new_anon_rmap(new_page, vma, address);
                mem_cgroup_commit_charge(new_page, memcg, false);
                lru_cache_add_active_or_unevictable(new_page, vma);
@@ -2377,12 +2378,12 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
 
 
-       mutex_lock(&mapping->i_mmap_mutex);
+       i_mmap_lock_read(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
-       mutex_unlock(&mapping->i_mmap_mutex);
+       i_mmap_unlock_read(mapping);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
@@ -3365,6 +3366,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(handle_mm_fault);
 
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
This page took 0.028968 seconds and 5 git commands to generate.