mm: replace vma prio_tree with an interval tree
[deliverable/linux.git] / mm / memory.c
index e09c048131869ac3c956aa8d6cfbec9677cdc642..d205e4381a34a8d1aaa2c795f52f3be02c3dbe8a 100644 (file)
@@ -2801,14 +2801,13 @@ static void unmap_mapping_range_vma(struct vm_area_struct *vma,
        zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
 }
 
-static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
+static inline void unmap_mapping_range_tree(struct rb_root *root,
                                            struct zap_details *details)
 {
        struct vm_area_struct *vma;
-       struct prio_tree_iter iter;
        pgoff_t vba, vea, zba, zea;
 
-       vma_prio_tree_foreach(vma, &iter, root,
+       vma_interval_tree_foreach(vma, root,
                        details->first_index, details->last_index) {
 
                vba = vma->vm_pgoff;
@@ -2839,7 +2838,7 @@ static inline void unmap_mapping_range_list(struct list_head *head,
         * across *all* the pages in each nonlinear VMA, not just the pages
         * whose virtual address lies outside the file truncation point.
         */
-       list_for_each_entry(vma, head, shared.vm_set.list) {
+       list_for_each_entry(vma, head, shared.nonlinear) {
                details->nonlinear_vma = vma;
                unmap_mapping_range_vma(vma, vma->vm_start, vma->vm_end, details);
        }
@@ -2883,7 +2882,7 @@ void unmap_mapping_range(struct address_space *mapping,
 
 
        mutex_lock(&mapping->i_mmap_mutex);
-       if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
+       if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
This page took 0.028038 seconds and 5 git commands to generate.