Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[deliverable/linux.git] / mm / rmap.c
1 /*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20 /*
21 * Lock ordering in mm:
22 *
23 * inode->i_mutex (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock
28 * anon_vma->lock
29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
31 * swap_lock (in swap_duplicate, swap_info_get)
32 * mmlist_lock (in mmput, drain_mmlist and others)
33 * mapping->private_lock (in __set_page_dirty_buffers)
34 * inode_lock (in set_page_dirty's __mark_inode_dirty)
35 * sb_lock (within inode_lock in fs/fs-writeback.c)
36 * mapping->tree_lock (widely used, in set_page_dirty,
37 * in arch-dependent flush_dcache_mmap_lock,
38 * within inode_lock in __sync_single_inode)
39 */
40
41 #include <linux/mm.h>
42 #include <linux/pagemap.h>
43 #include <linux/swap.h>
44 #include <linux/swapops.h>
45 #include <linux/slab.h>
46 #include <linux/init.h>
47 #include <linux/rmap.h>
48 #include <linux/rcupdate.h>
49 #include <linux/module.h>
50 #include <linux/kallsyms.h>
51
52 #include <asm/tlbflush.h>
53
54 struct kmem_cache *anon_vma_cachep;
55
56 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
57 {
58 #ifdef CONFIG_DEBUG_VM
59 struct anon_vma *anon_vma = find_vma->anon_vma;
60 struct vm_area_struct *vma;
61 unsigned int mapcount = 0;
62 int found = 0;
63
64 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
65 mapcount++;
66 BUG_ON(mapcount > 100000);
67 if (vma == find_vma)
68 found = 1;
69 }
70 BUG_ON(!found);
71 #endif
72 }
73
74 /* This must be called under the mmap_sem. */
75 int anon_vma_prepare(struct vm_area_struct *vma)
76 {
77 struct anon_vma *anon_vma = vma->anon_vma;
78
79 might_sleep();
80 if (unlikely(!anon_vma)) {
81 struct mm_struct *mm = vma->vm_mm;
82 struct anon_vma *allocated, *locked;
83
84 anon_vma = find_mergeable_anon_vma(vma);
85 if (anon_vma) {
86 allocated = NULL;
87 locked = anon_vma;
88 spin_lock(&locked->lock);
89 } else {
90 anon_vma = anon_vma_alloc();
91 if (unlikely(!anon_vma))
92 return -ENOMEM;
93 allocated = anon_vma;
94 locked = NULL;
95 }
96
97 /* page_table_lock to protect against threads */
98 spin_lock(&mm->page_table_lock);
99 if (likely(!vma->anon_vma)) {
100 vma->anon_vma = anon_vma;
101 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
102 allocated = NULL;
103 }
104 spin_unlock(&mm->page_table_lock);
105
106 if (locked)
107 spin_unlock(&locked->lock);
108 if (unlikely(allocated))
109 anon_vma_free(allocated);
110 }
111 return 0;
112 }
113
114 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
115 {
116 BUG_ON(vma->anon_vma != next->anon_vma);
117 list_del(&next->anon_vma_node);
118 }
119
120 void __anon_vma_link(struct vm_area_struct *vma)
121 {
122 struct anon_vma *anon_vma = vma->anon_vma;
123
124 if (anon_vma) {
125 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
126 validate_anon_vma(vma);
127 }
128 }
129
130 void anon_vma_link(struct vm_area_struct *vma)
131 {
132 struct anon_vma *anon_vma = vma->anon_vma;
133
134 if (anon_vma) {
135 spin_lock(&anon_vma->lock);
136 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
137 validate_anon_vma(vma);
138 spin_unlock(&anon_vma->lock);
139 }
140 }
141
142 void anon_vma_unlink(struct vm_area_struct *vma)
143 {
144 struct anon_vma *anon_vma = vma->anon_vma;
145 int empty;
146
147 if (!anon_vma)
148 return;
149
150 spin_lock(&anon_vma->lock);
151 validate_anon_vma(vma);
152 list_del(&vma->anon_vma_node);
153
154 /* We must garbage collect the anon_vma if it's empty */
155 empty = list_empty(&anon_vma->head);
156 spin_unlock(&anon_vma->lock);
157
158 if (empty)
159 anon_vma_free(anon_vma);
160 }
161
162 static void anon_vma_ctor(void *data, struct kmem_cache *cachep,
163 unsigned long flags)
164 {
165 if (flags & SLAB_CTOR_CONSTRUCTOR) {
166 struct anon_vma *anon_vma = data;
167
168 spin_lock_init(&anon_vma->lock);
169 INIT_LIST_HEAD(&anon_vma->head);
170 }
171 }
172
173 void __init anon_vma_init(void)
174 {
175 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
176 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
177 }
178
179 /*
180 * Getting a lock on a stable anon_vma from a page off the LRU is
181 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
182 */
183 static struct anon_vma *page_lock_anon_vma(struct page *page)
184 {
185 struct anon_vma *anon_vma;
186 unsigned long anon_mapping;
187
188 rcu_read_lock();
189 anon_mapping = (unsigned long) page->mapping;
190 if (!(anon_mapping & PAGE_MAPPING_ANON))
191 goto out;
192 if (!page_mapped(page))
193 goto out;
194
195 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
196 spin_lock(&anon_vma->lock);
197 return anon_vma;
198 out:
199 rcu_read_unlock();
200 return NULL;
201 }
202
203 static void page_unlock_anon_vma(struct anon_vma *anon_vma)
204 {
205 spin_unlock(&anon_vma->lock);
206 rcu_read_unlock();
207 }
208
209 /*
210 * At what user virtual address is page expected in vma?
211 */
212 static inline unsigned long
213 vma_address(struct page *page, struct vm_area_struct *vma)
214 {
215 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
216 unsigned long address;
217
218 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
219 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
220 /* page should be within any vma from prio_tree_next */
221 BUG_ON(!PageAnon(page));
222 return -EFAULT;
223 }
224 return address;
225 }
226
227 /*
228 * At what user virtual address is page expected in vma? checking that the
229 * page matches the vma: currently only used on anon pages, by unuse_vma;
230 */
231 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
232 {
233 if (PageAnon(page)) {
234 if ((void *)vma->anon_vma !=
235 (void *)page->mapping - PAGE_MAPPING_ANON)
236 return -EFAULT;
237 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
238 if (!vma->vm_file ||
239 vma->vm_file->f_mapping != page->mapping)
240 return -EFAULT;
241 } else
242 return -EFAULT;
243 return vma_address(page, vma);
244 }
245
246 /*
247 * Check that @page is mapped at @address into @mm.
248 *
249 * On success returns with pte mapped and locked.
250 */
251 pte_t *page_check_address(struct page *page, struct mm_struct *mm,
252 unsigned long address, spinlock_t **ptlp)
253 {
254 pgd_t *pgd;
255 pud_t *pud;
256 pmd_t *pmd;
257 pte_t *pte;
258 spinlock_t *ptl;
259
260 pgd = pgd_offset(mm, address);
261 if (!pgd_present(*pgd))
262 return NULL;
263
264 pud = pud_offset(pgd, address);
265 if (!pud_present(*pud))
266 return NULL;
267
268 pmd = pmd_offset(pud, address);
269 if (!pmd_present(*pmd))
270 return NULL;
271
272 pte = pte_offset_map(pmd, address);
273 /* Make a quick check before getting the lock */
274 if (!pte_present(*pte)) {
275 pte_unmap(pte);
276 return NULL;
277 }
278
279 ptl = pte_lockptr(mm, pmd);
280 spin_lock(ptl);
281 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
282 *ptlp = ptl;
283 return pte;
284 }
285 pte_unmap_unlock(pte, ptl);
286 return NULL;
287 }
288
289 /*
290 * Subfunctions of page_referenced: page_referenced_one called
291 * repeatedly from either page_referenced_anon or page_referenced_file.
292 */
293 static int page_referenced_one(struct page *page,
294 struct vm_area_struct *vma, unsigned int *mapcount)
295 {
296 struct mm_struct *mm = vma->vm_mm;
297 unsigned long address;
298 pte_t *pte;
299 spinlock_t *ptl;
300 int referenced = 0;
301
302 address = vma_address(page, vma);
303 if (address == -EFAULT)
304 goto out;
305
306 pte = page_check_address(page, mm, address, &ptl);
307 if (!pte)
308 goto out;
309
310 if (ptep_clear_flush_young(vma, address, pte))
311 referenced++;
312
313 /* Pretend the page is referenced if the task has the
314 swap token and is in the middle of a page fault. */
315 if (mm != current->mm && has_swap_token(mm) &&
316 rwsem_is_locked(&mm->mmap_sem))
317 referenced++;
318
319 (*mapcount)--;
320 pte_unmap_unlock(pte, ptl);
321 out:
322 return referenced;
323 }
324
325 static int page_referenced_anon(struct page *page)
326 {
327 unsigned int mapcount;
328 struct anon_vma *anon_vma;
329 struct vm_area_struct *vma;
330 int referenced = 0;
331
332 anon_vma = page_lock_anon_vma(page);
333 if (!anon_vma)
334 return referenced;
335
336 mapcount = page_mapcount(page);
337 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
338 referenced += page_referenced_one(page, vma, &mapcount);
339 if (!mapcount)
340 break;
341 }
342
343 page_unlock_anon_vma(anon_vma);
344 return referenced;
345 }
346
347 /**
348 * page_referenced_file - referenced check for object-based rmap
349 * @page: the page we're checking references on.
350 *
351 * For an object-based mapped page, find all the places it is mapped and
352 * check/clear the referenced flag. This is done by following the page->mapping
353 * pointer, then walking the chain of vmas it holds. It returns the number
354 * of references it found.
355 *
356 * This function is only called from page_referenced for object-based pages.
357 */
358 static int page_referenced_file(struct page *page)
359 {
360 unsigned int mapcount;
361 struct address_space *mapping = page->mapping;
362 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
363 struct vm_area_struct *vma;
364 struct prio_tree_iter iter;
365 int referenced = 0;
366
367 /*
368 * The caller's checks on page->mapping and !PageAnon have made
369 * sure that this is a file page: the check for page->mapping
370 * excludes the case just before it gets set on an anon page.
371 */
372 BUG_ON(PageAnon(page));
373
374 /*
375 * The page lock not only makes sure that page->mapping cannot
376 * suddenly be NULLified by truncation, it makes sure that the
377 * structure at mapping cannot be freed and reused yet,
378 * so we can safely take mapping->i_mmap_lock.
379 */
380 BUG_ON(!PageLocked(page));
381
382 spin_lock(&mapping->i_mmap_lock);
383
384 /*
385 * i_mmap_lock does not stabilize mapcount at all, but mapcount
386 * is more likely to be accurate if we note it after spinning.
387 */
388 mapcount = page_mapcount(page);
389
390 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
391 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
392 == (VM_LOCKED|VM_MAYSHARE)) {
393 referenced++;
394 break;
395 }
396 referenced += page_referenced_one(page, vma, &mapcount);
397 if (!mapcount)
398 break;
399 }
400
401 spin_unlock(&mapping->i_mmap_lock);
402 return referenced;
403 }
404
405 /**
406 * page_referenced - test if the page was referenced
407 * @page: the page to test
408 * @is_locked: caller holds lock on the page
409 *
410 * Quick test_and_clear_referenced for all mappings to a page,
411 * returns the number of ptes which referenced the page.
412 */
413 int page_referenced(struct page *page, int is_locked)
414 {
415 int referenced = 0;
416
417 if (page_test_and_clear_young(page))
418 referenced++;
419
420 if (TestClearPageReferenced(page))
421 referenced++;
422
423 if (page_mapped(page) && page->mapping) {
424 if (PageAnon(page))
425 referenced += page_referenced_anon(page);
426 else if (is_locked)
427 referenced += page_referenced_file(page);
428 else if (TestSetPageLocked(page))
429 referenced++;
430 else {
431 if (page->mapping)
432 referenced += page_referenced_file(page);
433 unlock_page(page);
434 }
435 }
436 return referenced;
437 }
438
439 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
440 {
441 struct mm_struct *mm = vma->vm_mm;
442 unsigned long address;
443 pte_t *pte;
444 spinlock_t *ptl;
445 int ret = 0;
446
447 address = vma_address(page, vma);
448 if (address == -EFAULT)
449 goto out;
450
451 pte = page_check_address(page, mm, address, &ptl);
452 if (!pte)
453 goto out;
454
455 if (pte_dirty(*pte) || pte_write(*pte)) {
456 pte_t entry;
457
458 flush_cache_page(vma, address, pte_pfn(*pte));
459 entry = ptep_clear_flush(vma, address, pte);
460 entry = pte_wrprotect(entry);
461 entry = pte_mkclean(entry);
462 set_pte_at(mm, address, pte, entry);
463 lazy_mmu_prot_update(entry);
464 ret = 1;
465 }
466
467 pte_unmap_unlock(pte, ptl);
468 out:
469 return ret;
470 }
471
472 static int page_mkclean_file(struct address_space *mapping, struct page *page)
473 {
474 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
475 struct vm_area_struct *vma;
476 struct prio_tree_iter iter;
477 int ret = 0;
478
479 BUG_ON(PageAnon(page));
480
481 spin_lock(&mapping->i_mmap_lock);
482 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
483 if (vma->vm_flags & VM_SHARED)
484 ret += page_mkclean_one(page, vma);
485 }
486 spin_unlock(&mapping->i_mmap_lock);
487 return ret;
488 }
489
490 int page_mkclean(struct page *page)
491 {
492 int ret = 0;
493
494 BUG_ON(!PageLocked(page));
495
496 if (page_mapped(page)) {
497 struct address_space *mapping = page_mapping(page);
498 if (mapping)
499 ret = page_mkclean_file(mapping, page);
500 if (page_test_dirty(page)) {
501 page_clear_dirty(page);
502 ret = 1;
503 }
504 }
505
506 return ret;
507 }
508
509 /**
510 * page_set_anon_rmap - setup new anonymous rmap
511 * @page: the page to add the mapping to
512 * @vma: the vm area in which the mapping is added
513 * @address: the user virtual address mapped
514 */
515 static void __page_set_anon_rmap(struct page *page,
516 struct vm_area_struct *vma, unsigned long address)
517 {
518 struct anon_vma *anon_vma = vma->anon_vma;
519
520 BUG_ON(!anon_vma);
521 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
522 page->mapping = (struct address_space *) anon_vma;
523
524 page->index = linear_page_index(vma, address);
525
526 /*
527 * nr_mapped state can be updated without turning off
528 * interrupts because it is not modified via interrupt.
529 */
530 __inc_zone_page_state(page, NR_ANON_PAGES);
531 }
532
533 /**
534 * page_add_anon_rmap - add pte mapping to an anonymous page
535 * @page: the page to add the mapping to
536 * @vma: the vm area in which the mapping is added
537 * @address: the user virtual address mapped
538 *
539 * The caller needs to hold the pte lock.
540 */
541 void page_add_anon_rmap(struct page *page,
542 struct vm_area_struct *vma, unsigned long address)
543 {
544 if (atomic_inc_and_test(&page->_mapcount))
545 __page_set_anon_rmap(page, vma, address);
546 /* else checking page index and mapping is racy */
547 }
548
549 /*
550 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
551 * @page: the page to add the mapping to
552 * @vma: the vm area in which the mapping is added
553 * @address: the user virtual address mapped
554 *
555 * Same as page_add_anon_rmap but must only be called on *new* pages.
556 * This means the inc-and-test can be bypassed.
557 */
558 void page_add_new_anon_rmap(struct page *page,
559 struct vm_area_struct *vma, unsigned long address)
560 {
561 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
562 __page_set_anon_rmap(page, vma, address);
563 }
564
565 /**
566 * page_add_file_rmap - add pte mapping to a file page
567 * @page: the page to add the mapping to
568 *
569 * The caller needs to hold the pte lock.
570 */
571 void page_add_file_rmap(struct page *page)
572 {
573 if (atomic_inc_and_test(&page->_mapcount))
574 __inc_zone_page_state(page, NR_FILE_MAPPED);
575 }
576
577 /**
578 * page_remove_rmap - take down pte mapping from a page
579 * @page: page to remove mapping from
580 *
581 * The caller needs to hold the pte lock.
582 */
583 void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
584 {
585 if (atomic_add_negative(-1, &page->_mapcount)) {
586 if (unlikely(page_mapcount(page) < 0)) {
587 printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
588 printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page));
589 printk (KERN_EMERG " page->flags = %lx\n", page->flags);
590 printk (KERN_EMERG " page->count = %x\n", page_count(page));
591 printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
592 print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
593 if (vma->vm_ops)
594 print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
595 if (vma->vm_file && vma->vm_file->f_op)
596 print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap);
597 BUG();
598 }
599
600 /*
601 * It would be tidy to reset the PageAnon mapping here,
602 * but that might overwrite a racing page_add_anon_rmap
603 * which increments mapcount after us but sets mapping
604 * before us: so leave the reset to free_hot_cold_page,
605 * and remember that it's only reliable while mapped.
606 * Leaving it set also helps swapoff to reinstate ptes
607 * faster for those pages still in swapcache.
608 */
609 if (page_test_dirty(page)) {
610 page_clear_dirty(page);
611 set_page_dirty(page);
612 }
613 __dec_zone_page_state(page,
614 PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
615 }
616 }
617
618 /*
619 * Subfunctions of try_to_unmap: try_to_unmap_one called
620 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
621 */
622 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
623 int migration)
624 {
625 struct mm_struct *mm = vma->vm_mm;
626 unsigned long address;
627 pte_t *pte;
628 pte_t pteval;
629 spinlock_t *ptl;
630 int ret = SWAP_AGAIN;
631
632 address = vma_address(page, vma);
633 if (address == -EFAULT)
634 goto out;
635
636 pte = page_check_address(page, mm, address, &ptl);
637 if (!pte)
638 goto out;
639
640 /*
641 * If the page is mlock()d, we cannot swap it out.
642 * If it's recently referenced (perhaps page_referenced
643 * skipped over this mm) then we should reactivate it.
644 */
645 if (!migration && ((vma->vm_flags & VM_LOCKED) ||
646 (ptep_clear_flush_young(vma, address, pte)))) {
647 ret = SWAP_FAIL;
648 goto out_unmap;
649 }
650
651 /* Nuke the page table entry. */
652 flush_cache_page(vma, address, page_to_pfn(page));
653 pteval = ptep_clear_flush(vma, address, pte);
654
655 /* Move the dirty bit to the physical page now the pte is gone. */
656 if (pte_dirty(pteval))
657 set_page_dirty(page);
658
659 /* Update high watermark before we lower rss */
660 update_hiwater_rss(mm);
661
662 if (PageAnon(page)) {
663 swp_entry_t entry = { .val = page_private(page) };
664
665 if (PageSwapCache(page)) {
666 /*
667 * Store the swap location in the pte.
668 * See handle_pte_fault() ...
669 */
670 swap_duplicate(entry);
671 if (list_empty(&mm->mmlist)) {
672 spin_lock(&mmlist_lock);
673 if (list_empty(&mm->mmlist))
674 list_add(&mm->mmlist, &init_mm.mmlist);
675 spin_unlock(&mmlist_lock);
676 }
677 dec_mm_counter(mm, anon_rss);
678 #ifdef CONFIG_MIGRATION
679 } else {
680 /*
681 * Store the pfn of the page in a special migration
682 * pte. do_swap_page() will wait until the migration
683 * pte is removed and then restart fault handling.
684 */
685 BUG_ON(!migration);
686 entry = make_migration_entry(page, pte_write(pteval));
687 #endif
688 }
689 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
690 BUG_ON(pte_file(*pte));
691 } else
692 #ifdef CONFIG_MIGRATION
693 if (migration) {
694 /* Establish migration entry for a file page */
695 swp_entry_t entry;
696 entry = make_migration_entry(page, pte_write(pteval));
697 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
698 } else
699 #endif
700 dec_mm_counter(mm, file_rss);
701
702
703 page_remove_rmap(page, vma);
704 page_cache_release(page);
705
706 out_unmap:
707 pte_unmap_unlock(pte, ptl);
708 out:
709 return ret;
710 }
711
712 /*
713 * objrmap doesn't work for nonlinear VMAs because the assumption that
714 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
715 * Consequently, given a particular page and its ->index, we cannot locate the
716 * ptes which are mapping that page without an exhaustive linear search.
717 *
718 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
719 * maps the file to which the target page belongs. The ->vm_private_data field
720 * holds the current cursor into that scan. Successive searches will circulate
721 * around the vma's virtual address space.
722 *
723 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
724 * more scanning pressure is placed against them as well. Eventually pages
725 * will become fully unmapped and are eligible for eviction.
726 *
727 * For very sparsely populated VMAs this is a little inefficient - chances are
728 * there there won't be many ptes located within the scan cluster. In this case
729 * maybe we could scan further - to the end of the pte page, perhaps.
730 */
731 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
732 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
733
734 static void try_to_unmap_cluster(unsigned long cursor,
735 unsigned int *mapcount, struct vm_area_struct *vma)
736 {
737 struct mm_struct *mm = vma->vm_mm;
738 pgd_t *pgd;
739 pud_t *pud;
740 pmd_t *pmd;
741 pte_t *pte;
742 pte_t pteval;
743 spinlock_t *ptl;
744 struct page *page;
745 unsigned long address;
746 unsigned long end;
747
748 address = (vma->vm_start + cursor) & CLUSTER_MASK;
749 end = address + CLUSTER_SIZE;
750 if (address < vma->vm_start)
751 address = vma->vm_start;
752 if (end > vma->vm_end)
753 end = vma->vm_end;
754
755 pgd = pgd_offset(mm, address);
756 if (!pgd_present(*pgd))
757 return;
758
759 pud = pud_offset(pgd, address);
760 if (!pud_present(*pud))
761 return;
762
763 pmd = pmd_offset(pud, address);
764 if (!pmd_present(*pmd))
765 return;
766
767 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
768
769 /* Update high watermark before we lower rss */
770 update_hiwater_rss(mm);
771
772 for (; address < end; pte++, address += PAGE_SIZE) {
773 if (!pte_present(*pte))
774 continue;
775 page = vm_normal_page(vma, address, *pte);
776 BUG_ON(!page || PageAnon(page));
777
778 if (ptep_clear_flush_young(vma, address, pte))
779 continue;
780
781 /* Nuke the page table entry. */
782 flush_cache_page(vma, address, pte_pfn(*pte));
783 pteval = ptep_clear_flush(vma, address, pte);
784
785 /* If nonlinear, store the file page offset in the pte. */
786 if (page->index != linear_page_index(vma, address))
787 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
788
789 /* Move the dirty bit to the physical page now the pte is gone. */
790 if (pte_dirty(pteval))
791 set_page_dirty(page);
792
793 page_remove_rmap(page, vma);
794 page_cache_release(page);
795 dec_mm_counter(mm, file_rss);
796 (*mapcount)--;
797 }
798 pte_unmap_unlock(pte - 1, ptl);
799 }
800
801 static int try_to_unmap_anon(struct page *page, int migration)
802 {
803 struct anon_vma *anon_vma;
804 struct vm_area_struct *vma;
805 int ret = SWAP_AGAIN;
806
807 anon_vma = page_lock_anon_vma(page);
808 if (!anon_vma)
809 return ret;
810
811 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
812 ret = try_to_unmap_one(page, vma, migration);
813 if (ret == SWAP_FAIL || !page_mapped(page))
814 break;
815 }
816
817 page_unlock_anon_vma(anon_vma);
818 return ret;
819 }
820
821 /**
822 * try_to_unmap_file - unmap file page using the object-based rmap method
823 * @page: the page to unmap
824 *
825 * Find all the mappings of a page using the mapping pointer and the vma chains
826 * contained in the address_space struct it points to.
827 *
828 * This function is only called from try_to_unmap for object-based pages.
829 */
830 static int try_to_unmap_file(struct page *page, int migration)
831 {
832 struct address_space *mapping = page->mapping;
833 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
834 struct vm_area_struct *vma;
835 struct prio_tree_iter iter;
836 int ret = SWAP_AGAIN;
837 unsigned long cursor;
838 unsigned long max_nl_cursor = 0;
839 unsigned long max_nl_size = 0;
840 unsigned int mapcount;
841
842 spin_lock(&mapping->i_mmap_lock);
843 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
844 ret = try_to_unmap_one(page, vma, migration);
845 if (ret == SWAP_FAIL || !page_mapped(page))
846 goto out;
847 }
848
849 if (list_empty(&mapping->i_mmap_nonlinear))
850 goto out;
851
852 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
853 shared.vm_set.list) {
854 if ((vma->vm_flags & VM_LOCKED) && !migration)
855 continue;
856 cursor = (unsigned long) vma->vm_private_data;
857 if (cursor > max_nl_cursor)
858 max_nl_cursor = cursor;
859 cursor = vma->vm_end - vma->vm_start;
860 if (cursor > max_nl_size)
861 max_nl_size = cursor;
862 }
863
864 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
865 ret = SWAP_FAIL;
866 goto out;
867 }
868
869 /*
870 * We don't try to search for this page in the nonlinear vmas,
871 * and page_referenced wouldn't have found it anyway. Instead
872 * just walk the nonlinear vmas trying to age and unmap some.
873 * The mapcount of the page we came in with is irrelevant,
874 * but even so use it as a guide to how hard we should try?
875 */
876 mapcount = page_mapcount(page);
877 if (!mapcount)
878 goto out;
879 cond_resched_lock(&mapping->i_mmap_lock);
880
881 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
882 if (max_nl_cursor == 0)
883 max_nl_cursor = CLUSTER_SIZE;
884
885 do {
886 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
887 shared.vm_set.list) {
888 if ((vma->vm_flags & VM_LOCKED) && !migration)
889 continue;
890 cursor = (unsigned long) vma->vm_private_data;
891 while ( cursor < max_nl_cursor &&
892 cursor < vma->vm_end - vma->vm_start) {
893 try_to_unmap_cluster(cursor, &mapcount, vma);
894 cursor += CLUSTER_SIZE;
895 vma->vm_private_data = (void *) cursor;
896 if ((int)mapcount <= 0)
897 goto out;
898 }
899 vma->vm_private_data = (void *) max_nl_cursor;
900 }
901 cond_resched_lock(&mapping->i_mmap_lock);
902 max_nl_cursor += CLUSTER_SIZE;
903 } while (max_nl_cursor <= max_nl_size);
904
905 /*
906 * Don't loop forever (perhaps all the remaining pages are
907 * in locked vmas). Reset cursor on all unreserved nonlinear
908 * vmas, now forgetting on which ones it had fallen behind.
909 */
910 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
911 vma->vm_private_data = NULL;
912 out:
913 spin_unlock(&mapping->i_mmap_lock);
914 return ret;
915 }
916
917 /**
918 * try_to_unmap - try to remove all page table mappings to a page
919 * @page: the page to get unmapped
920 *
921 * Tries to remove all the page table entries which are mapping this
922 * page, used in the pageout path. Caller must hold the page lock.
923 * Return values are:
924 *
925 * SWAP_SUCCESS - we succeeded in removing all mappings
926 * SWAP_AGAIN - we missed a mapping, try again later
927 * SWAP_FAIL - the page is unswappable
928 */
929 int try_to_unmap(struct page *page, int migration)
930 {
931 int ret;
932
933 BUG_ON(!PageLocked(page));
934
935 if (PageAnon(page))
936 ret = try_to_unmap_anon(page, migration);
937 else
938 ret = try_to_unmap_file(page, migration);
939
940 if (!page_mapped(page))
941 ret = SWAP_SUCCESS;
942 return ret;
943 }
944
This page took 0.076273 seconds and 6 git commands to generate.