[PATCH] unpaged: VM_NONLINEAR VM_RESERVED
[deliverable/linux.git] / mm / rmap.c
1 /*
2 * mm/rmap.c - physical to virtual reverse mappings
3 *
4 * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
5 * Released under the General Public License (GPL).
6 *
7 * Simple, low overhead reverse mapping scheme.
8 * Please try to keep this thing as modular as possible.
9 *
10 * Provides methods for unmapping each kind of mapped page:
11 * the anon methods track anonymous pages, and
12 * the file methods track pages belonging to an inode.
13 *
14 * Original design by Rik van Riel <riel@conectiva.com.br> 2001
15 * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
16 * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
17 * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004
18 */
19
20 /*
21 * Lock ordering in mm:
22 *
23 * inode->i_sem (while writing or truncating, not reading or faulting)
24 * inode->i_alloc_sem
25 *
26 * When a page fault occurs in writing from user to file, down_read
27 * of mmap_sem nests within i_sem; in sys_msync, i_sem nests within
28 * down_read of mmap_sem; i_sem and down_write of mmap_sem are never
29 * taken together; in truncation, i_sem is taken outermost.
30 *
31 * mm->mmap_sem
32 * page->flags PG_locked (lock_page)
33 * mapping->i_mmap_lock
34 * anon_vma->lock
35 * mm->page_table_lock or pte_lock
36 * zone->lru_lock (in mark_page_accessed)
37 * swap_lock (in swap_duplicate, swap_info_get)
38 * mmlist_lock (in mmput, drain_mmlist and others)
39 * mapping->private_lock (in __set_page_dirty_buffers)
40 * inode_lock (in set_page_dirty's __mark_inode_dirty)
41 * sb_lock (within inode_lock in fs/fs-writeback.c)
42 * mapping->tree_lock (widely used, in set_page_dirty,
43 * in arch-dependent flush_dcache_mmap_lock,
44 * within inode_lock in __sync_single_inode)
45 */
46
47 #include <linux/mm.h>
48 #include <linux/pagemap.h>
49 #include <linux/swap.h>
50 #include <linux/swapops.h>
51 #include <linux/slab.h>
52 #include <linux/init.h>
53 #include <linux/rmap.h>
54 #include <linux/rcupdate.h>
55
56 #include <asm/tlbflush.h>
57
58 //#define RMAP_DEBUG /* can be enabled only for debugging */
59
60 kmem_cache_t *anon_vma_cachep;
61
62 static inline void validate_anon_vma(struct vm_area_struct *find_vma)
63 {
64 #ifdef RMAP_DEBUG
65 struct anon_vma *anon_vma = find_vma->anon_vma;
66 struct vm_area_struct *vma;
67 unsigned int mapcount = 0;
68 int found = 0;
69
70 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
71 mapcount++;
72 BUG_ON(mapcount > 100000);
73 if (vma == find_vma)
74 found = 1;
75 }
76 BUG_ON(!found);
77 #endif
78 }
79
80 /* This must be called under the mmap_sem. */
81 int anon_vma_prepare(struct vm_area_struct *vma)
82 {
83 struct anon_vma *anon_vma = vma->anon_vma;
84
85 might_sleep();
86 if (unlikely(!anon_vma)) {
87 struct mm_struct *mm = vma->vm_mm;
88 struct anon_vma *allocated, *locked;
89
90 anon_vma = find_mergeable_anon_vma(vma);
91 if (anon_vma) {
92 allocated = NULL;
93 locked = anon_vma;
94 spin_lock(&locked->lock);
95 } else {
96 anon_vma = anon_vma_alloc();
97 if (unlikely(!anon_vma))
98 return -ENOMEM;
99 allocated = anon_vma;
100 locked = NULL;
101 }
102
103 /* page_table_lock to protect against threads */
104 spin_lock(&mm->page_table_lock);
105 if (likely(!vma->anon_vma)) {
106 vma->anon_vma = anon_vma;
107 list_add(&vma->anon_vma_node, &anon_vma->head);
108 allocated = NULL;
109 }
110 spin_unlock(&mm->page_table_lock);
111
112 if (locked)
113 spin_unlock(&locked->lock);
114 if (unlikely(allocated))
115 anon_vma_free(allocated);
116 }
117 return 0;
118 }
119
120 void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
121 {
122 BUG_ON(vma->anon_vma != next->anon_vma);
123 list_del(&next->anon_vma_node);
124 }
125
126 void __anon_vma_link(struct vm_area_struct *vma)
127 {
128 struct anon_vma *anon_vma = vma->anon_vma;
129
130 if (anon_vma) {
131 list_add(&vma->anon_vma_node, &anon_vma->head);
132 validate_anon_vma(vma);
133 }
134 }
135
136 void anon_vma_link(struct vm_area_struct *vma)
137 {
138 struct anon_vma *anon_vma = vma->anon_vma;
139
140 if (anon_vma) {
141 spin_lock(&anon_vma->lock);
142 list_add(&vma->anon_vma_node, &anon_vma->head);
143 validate_anon_vma(vma);
144 spin_unlock(&anon_vma->lock);
145 }
146 }
147
148 void anon_vma_unlink(struct vm_area_struct *vma)
149 {
150 struct anon_vma *anon_vma = vma->anon_vma;
151 int empty;
152
153 if (!anon_vma)
154 return;
155
156 spin_lock(&anon_vma->lock);
157 validate_anon_vma(vma);
158 list_del(&vma->anon_vma_node);
159
160 /* We must garbage collect the anon_vma if it's empty */
161 empty = list_empty(&anon_vma->head);
162 spin_unlock(&anon_vma->lock);
163
164 if (empty)
165 anon_vma_free(anon_vma);
166 }
167
168 static void anon_vma_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
169 {
170 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
171 SLAB_CTOR_CONSTRUCTOR) {
172 struct anon_vma *anon_vma = data;
173
174 spin_lock_init(&anon_vma->lock);
175 INIT_LIST_HEAD(&anon_vma->head);
176 }
177 }
178
179 void __init anon_vma_init(void)
180 {
181 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
182 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL);
183 }
184
185 /*
186 * Getting a lock on a stable anon_vma from a page off the LRU is
187 * tricky: page_lock_anon_vma rely on RCU to guard against the races.
188 */
189 static struct anon_vma *page_lock_anon_vma(struct page *page)
190 {
191 struct anon_vma *anon_vma = NULL;
192 unsigned long anon_mapping;
193
194 rcu_read_lock();
195 anon_mapping = (unsigned long) page->mapping;
196 if (!(anon_mapping & PAGE_MAPPING_ANON))
197 goto out;
198 if (!page_mapped(page))
199 goto out;
200
201 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
202 spin_lock(&anon_vma->lock);
203 out:
204 rcu_read_unlock();
205 return anon_vma;
206 }
207
208 /*
209 * At what user virtual address is page expected in vma?
210 */
211 static inline unsigned long
212 vma_address(struct page *page, struct vm_area_struct *vma)
213 {
214 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
215 unsigned long address;
216
217 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
218 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
219 /* page should be within any vma from prio_tree_next */
220 BUG_ON(!PageAnon(page));
221 return -EFAULT;
222 }
223 return address;
224 }
225
226 /*
227 * At what user virtual address is page expected in vma? checking that the
228 * page matches the vma: currently only used by unuse_process, on anon pages.
229 */
230 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
231 {
232 if (PageAnon(page)) {
233 if ((void *)vma->anon_vma !=
234 (void *)page->mapping - PAGE_MAPPING_ANON)
235 return -EFAULT;
236 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
237 if (vma->vm_file->f_mapping != page->mapping)
238 return -EFAULT;
239 } else
240 return -EFAULT;
241 return vma_address(page, vma);
242 }
243
244 /*
245 * Check that @page is mapped at @address into @mm.
246 *
247 * On success returns with pte mapped and locked.
248 */
249 pte_t *page_check_address(struct page *page, struct mm_struct *mm,
250 unsigned long address, spinlock_t **ptlp)
251 {
252 pgd_t *pgd;
253 pud_t *pud;
254 pmd_t *pmd;
255 pte_t *pte;
256 spinlock_t *ptl;
257
258 pgd = pgd_offset(mm, address);
259 if (!pgd_present(*pgd))
260 return NULL;
261
262 pud = pud_offset(pgd, address);
263 if (!pud_present(*pud))
264 return NULL;
265
266 pmd = pmd_offset(pud, address);
267 if (!pmd_present(*pmd))
268 return NULL;
269
270 pte = pte_offset_map(pmd, address);
271 /* Make a quick check before getting the lock */
272 if (!pte_present(*pte)) {
273 pte_unmap(pte);
274 return NULL;
275 }
276
277 ptl = pte_lockptr(mm, pmd);
278 spin_lock(ptl);
279 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
280 *ptlp = ptl;
281 return pte;
282 }
283 pte_unmap_unlock(pte, ptl);
284 return NULL;
285 }
286
287 /*
288 * Subfunctions of page_referenced: page_referenced_one called
289 * repeatedly from either page_referenced_anon or page_referenced_file.
290 */
291 static int page_referenced_one(struct page *page,
292 struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token)
293 {
294 struct mm_struct *mm = vma->vm_mm;
295 unsigned long address;
296 pte_t *pte;
297 spinlock_t *ptl;
298 int referenced = 0;
299
300 address = vma_address(page, vma);
301 if (address == -EFAULT)
302 goto out;
303
304 pte = page_check_address(page, mm, address, &ptl);
305 if (!pte)
306 goto out;
307
308 if (ptep_clear_flush_young(vma, address, pte))
309 referenced++;
310
311 /* Pretend the page is referenced if the task has the
312 swap token and is in the middle of a page fault. */
313 if (mm != current->mm && !ignore_token && has_swap_token(mm) &&
314 rwsem_is_locked(&mm->mmap_sem))
315 referenced++;
316
317 (*mapcount)--;
318 pte_unmap_unlock(pte, ptl);
319 out:
320 return referenced;
321 }
322
323 static int page_referenced_anon(struct page *page, int ignore_token)
324 {
325 unsigned int mapcount;
326 struct anon_vma *anon_vma;
327 struct vm_area_struct *vma;
328 int referenced = 0;
329
330 anon_vma = page_lock_anon_vma(page);
331 if (!anon_vma)
332 return referenced;
333
334 mapcount = page_mapcount(page);
335 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
336 referenced += page_referenced_one(page, vma, &mapcount,
337 ignore_token);
338 if (!mapcount)
339 break;
340 }
341 spin_unlock(&anon_vma->lock);
342 return referenced;
343 }
344
345 /**
346 * page_referenced_file - referenced check for object-based rmap
347 * @page: the page we're checking references on.
348 *
349 * For an object-based mapped page, find all the places it is mapped and
350 * check/clear the referenced flag. This is done by following the page->mapping
351 * pointer, then walking the chain of vmas it holds. It returns the number
352 * of references it found.
353 *
354 * This function is only called from page_referenced for object-based pages.
355 */
356 static int page_referenced_file(struct page *page, int ignore_token)
357 {
358 unsigned int mapcount;
359 struct address_space *mapping = page->mapping;
360 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
361 struct vm_area_struct *vma;
362 struct prio_tree_iter iter;
363 int referenced = 0;
364
365 /*
366 * The caller's checks on page->mapping and !PageAnon have made
367 * sure that this is a file page: the check for page->mapping
368 * excludes the case just before it gets set on an anon page.
369 */
370 BUG_ON(PageAnon(page));
371
372 /*
373 * The page lock not only makes sure that page->mapping cannot
374 * suddenly be NULLified by truncation, it makes sure that the
375 * structure at mapping cannot be freed and reused yet,
376 * so we can safely take mapping->i_mmap_lock.
377 */
378 BUG_ON(!PageLocked(page));
379
380 spin_lock(&mapping->i_mmap_lock);
381
382 /*
383 * i_mmap_lock does not stabilize mapcount at all, but mapcount
384 * is more likely to be accurate if we note it after spinning.
385 */
386 mapcount = page_mapcount(page);
387
388 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
389 if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
390 == (VM_LOCKED|VM_MAYSHARE)) {
391 referenced++;
392 break;
393 }
394 referenced += page_referenced_one(page, vma, &mapcount,
395 ignore_token);
396 if (!mapcount)
397 break;
398 }
399
400 spin_unlock(&mapping->i_mmap_lock);
401 return referenced;
402 }
403
404 /**
405 * page_referenced - test if the page was referenced
406 * @page: the page to test
407 * @is_locked: caller holds lock on the page
408 *
409 * Quick test_and_clear_referenced for all mappings to a page,
410 * returns the number of ptes which referenced the page.
411 */
412 int page_referenced(struct page *page, int is_locked, int ignore_token)
413 {
414 int referenced = 0;
415
416 if (!swap_token_default_timeout)
417 ignore_token = 1;
418
419 if (page_test_and_clear_young(page))
420 referenced++;
421
422 if (TestClearPageReferenced(page))
423 referenced++;
424
425 if (page_mapped(page) && page->mapping) {
426 if (PageAnon(page))
427 referenced += page_referenced_anon(page, ignore_token);
428 else if (is_locked)
429 referenced += page_referenced_file(page, ignore_token);
430 else if (TestSetPageLocked(page))
431 referenced++;
432 else {
433 if (page->mapping)
434 referenced += page_referenced_file(page,
435 ignore_token);
436 unlock_page(page);
437 }
438 }
439 return referenced;
440 }
441
442 /**
443 * page_add_anon_rmap - add pte mapping to an anonymous page
444 * @page: the page to add the mapping to
445 * @vma: the vm area in which the mapping is added
446 * @address: the user virtual address mapped
447 *
448 * The caller needs to hold the pte lock.
449 */
450 void page_add_anon_rmap(struct page *page,
451 struct vm_area_struct *vma, unsigned long address)
452 {
453 if (atomic_inc_and_test(&page->_mapcount)) {
454 struct anon_vma *anon_vma = vma->anon_vma;
455
456 BUG_ON(!anon_vma);
457 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
458 page->mapping = (struct address_space *) anon_vma;
459
460 page->index = linear_page_index(vma, address);
461
462 inc_page_state(nr_mapped);
463 }
464 /* else checking page index and mapping is racy */
465 }
466
467 /**
468 * page_add_file_rmap - add pte mapping to a file page
469 * @page: the page to add the mapping to
470 *
471 * The caller needs to hold the pte lock.
472 */
473 void page_add_file_rmap(struct page *page)
474 {
475 BUG_ON(PageAnon(page));
476 BUG_ON(!pfn_valid(page_to_pfn(page)));
477
478 if (atomic_inc_and_test(&page->_mapcount))
479 inc_page_state(nr_mapped);
480 }
481
482 /**
483 * page_remove_rmap - take down pte mapping from a page
484 * @page: page to remove mapping from
485 *
486 * The caller needs to hold the pte lock.
487 */
488 void page_remove_rmap(struct page *page)
489 {
490 if (atomic_add_negative(-1, &page->_mapcount)) {
491 BUG_ON(page_mapcount(page) < 0);
492 /*
493 * It would be tidy to reset the PageAnon mapping here,
494 * but that might overwrite a racing page_add_anon_rmap
495 * which increments mapcount after us but sets mapping
496 * before us: so leave the reset to free_hot_cold_page,
497 * and remember that it's only reliable while mapped.
498 * Leaving it set also helps swapoff to reinstate ptes
499 * faster for those pages still in swapcache.
500 */
501 if (page_test_and_clear_dirty(page))
502 set_page_dirty(page);
503 dec_page_state(nr_mapped);
504 }
505 }
506
507 /*
508 * Subfunctions of try_to_unmap: try_to_unmap_one called
509 * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
510 */
511 static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
512 {
513 struct mm_struct *mm = vma->vm_mm;
514 unsigned long address;
515 pte_t *pte;
516 pte_t pteval;
517 spinlock_t *ptl;
518 int ret = SWAP_AGAIN;
519
520 address = vma_address(page, vma);
521 if (address == -EFAULT)
522 goto out;
523
524 pte = page_check_address(page, mm, address, &ptl);
525 if (!pte)
526 goto out;
527
528 /*
529 * If the page is mlock()d, we cannot swap it out.
530 * If it's recently referenced (perhaps page_referenced
531 * skipped over this mm) then we should reactivate it.
532 */
533 if ((vma->vm_flags & VM_LOCKED) ||
534 ptep_clear_flush_young(vma, address, pte)) {
535 ret = SWAP_FAIL;
536 goto out_unmap;
537 }
538
539 /* Nuke the page table entry. */
540 flush_cache_page(vma, address, page_to_pfn(page));
541 pteval = ptep_clear_flush(vma, address, pte);
542
543 /* Move the dirty bit to the physical page now the pte is gone. */
544 if (pte_dirty(pteval))
545 set_page_dirty(page);
546
547 /* Update high watermark before we lower rss */
548 update_hiwater_rss(mm);
549
550 if (PageAnon(page)) {
551 swp_entry_t entry = { .val = page_private(page) };
552 /*
553 * Store the swap location in the pte.
554 * See handle_pte_fault() ...
555 */
556 BUG_ON(!PageSwapCache(page));
557 swap_duplicate(entry);
558 if (list_empty(&mm->mmlist)) {
559 spin_lock(&mmlist_lock);
560 if (list_empty(&mm->mmlist))
561 list_add(&mm->mmlist, &init_mm.mmlist);
562 spin_unlock(&mmlist_lock);
563 }
564 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
565 BUG_ON(pte_file(*pte));
566 dec_mm_counter(mm, anon_rss);
567 } else
568 dec_mm_counter(mm, file_rss);
569
570 page_remove_rmap(page);
571 page_cache_release(page);
572
573 out_unmap:
574 pte_unmap_unlock(pte, ptl);
575 out:
576 return ret;
577 }
578
579 /*
580 * objrmap doesn't work for nonlinear VMAs because the assumption that
581 * offset-into-file correlates with offset-into-virtual-addresses does not hold.
582 * Consequently, given a particular page and its ->index, we cannot locate the
583 * ptes which are mapping that page without an exhaustive linear search.
584 *
585 * So what this code does is a mini "virtual scan" of each nonlinear VMA which
586 * maps the file to which the target page belongs. The ->vm_private_data field
587 * holds the current cursor into that scan. Successive searches will circulate
588 * around the vma's virtual address space.
589 *
590 * So as more replacement pressure is applied to the pages in a nonlinear VMA,
591 * more scanning pressure is placed against them as well. Eventually pages
592 * will become fully unmapped and are eligible for eviction.
593 *
594 * For very sparsely populated VMAs this is a little inefficient - chances are
595 * there there won't be many ptes located within the scan cluster. In this case
596 * maybe we could scan further - to the end of the pte page, perhaps.
597 */
598 #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
599 #define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
600
601 static void try_to_unmap_cluster(unsigned long cursor,
602 unsigned int *mapcount, struct vm_area_struct *vma)
603 {
604 struct mm_struct *mm = vma->vm_mm;
605 pgd_t *pgd;
606 pud_t *pud;
607 pmd_t *pmd;
608 pte_t *pte;
609 pte_t pteval;
610 spinlock_t *ptl;
611 struct page *page;
612 unsigned long address;
613 unsigned long end;
614 unsigned long pfn;
615
616 address = (vma->vm_start + cursor) & CLUSTER_MASK;
617 end = address + CLUSTER_SIZE;
618 if (address < vma->vm_start)
619 address = vma->vm_start;
620 if (end > vma->vm_end)
621 end = vma->vm_end;
622
623 pgd = pgd_offset(mm, address);
624 if (!pgd_present(*pgd))
625 return;
626
627 pud = pud_offset(pgd, address);
628 if (!pud_present(*pud))
629 return;
630
631 pmd = pmd_offset(pud, address);
632 if (!pmd_present(*pmd))
633 return;
634
635 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
636
637 /* Update high watermark before we lower rss */
638 update_hiwater_rss(mm);
639
640 for (; address < end; pte++, address += PAGE_SIZE) {
641 if (!pte_present(*pte))
642 continue;
643
644 pfn = pte_pfn(*pte);
645 if (unlikely(!pfn_valid(pfn))) {
646 print_bad_pte(vma, *pte, address);
647 continue;
648 }
649
650 page = pfn_to_page(pfn);
651 BUG_ON(PageAnon(page));
652
653 if (ptep_clear_flush_young(vma, address, pte))
654 continue;
655
656 /* Nuke the page table entry. */
657 flush_cache_page(vma, address, pfn);
658 pteval = ptep_clear_flush(vma, address, pte);
659
660 /* If nonlinear, store the file page offset in the pte. */
661 if (page->index != linear_page_index(vma, address))
662 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
663
664 /* Move the dirty bit to the physical page now the pte is gone. */
665 if (pte_dirty(pteval))
666 set_page_dirty(page);
667
668 page_remove_rmap(page);
669 page_cache_release(page);
670 dec_mm_counter(mm, file_rss);
671 (*mapcount)--;
672 }
673 pte_unmap_unlock(pte - 1, ptl);
674 }
675
676 static int try_to_unmap_anon(struct page *page)
677 {
678 struct anon_vma *anon_vma;
679 struct vm_area_struct *vma;
680 int ret = SWAP_AGAIN;
681
682 anon_vma = page_lock_anon_vma(page);
683 if (!anon_vma)
684 return ret;
685
686 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
687 ret = try_to_unmap_one(page, vma);
688 if (ret == SWAP_FAIL || !page_mapped(page))
689 break;
690 }
691 spin_unlock(&anon_vma->lock);
692 return ret;
693 }
694
695 /**
696 * try_to_unmap_file - unmap file page using the object-based rmap method
697 * @page: the page to unmap
698 *
699 * Find all the mappings of a page using the mapping pointer and the vma chains
700 * contained in the address_space struct it points to.
701 *
702 * This function is only called from try_to_unmap for object-based pages.
703 */
704 static int try_to_unmap_file(struct page *page)
705 {
706 struct address_space *mapping = page->mapping;
707 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
708 struct vm_area_struct *vma;
709 struct prio_tree_iter iter;
710 int ret = SWAP_AGAIN;
711 unsigned long cursor;
712 unsigned long max_nl_cursor = 0;
713 unsigned long max_nl_size = 0;
714 unsigned int mapcount;
715
716 spin_lock(&mapping->i_mmap_lock);
717 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
718 ret = try_to_unmap_one(page, vma);
719 if (ret == SWAP_FAIL || !page_mapped(page))
720 goto out;
721 }
722
723 if (list_empty(&mapping->i_mmap_nonlinear))
724 goto out;
725
726 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
727 shared.vm_set.list) {
728 if (vma->vm_flags & VM_LOCKED)
729 continue;
730 cursor = (unsigned long) vma->vm_private_data;
731 if (cursor > max_nl_cursor)
732 max_nl_cursor = cursor;
733 cursor = vma->vm_end - vma->vm_start;
734 if (cursor > max_nl_size)
735 max_nl_size = cursor;
736 }
737
738 if (max_nl_size == 0) { /* any nonlinears locked or reserved */
739 ret = SWAP_FAIL;
740 goto out;
741 }
742
743 /*
744 * We don't try to search for this page in the nonlinear vmas,
745 * and page_referenced wouldn't have found it anyway. Instead
746 * just walk the nonlinear vmas trying to age and unmap some.
747 * The mapcount of the page we came in with is irrelevant,
748 * but even so use it as a guide to how hard we should try?
749 */
750 mapcount = page_mapcount(page);
751 if (!mapcount)
752 goto out;
753 cond_resched_lock(&mapping->i_mmap_lock);
754
755 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
756 if (max_nl_cursor == 0)
757 max_nl_cursor = CLUSTER_SIZE;
758
759 do {
760 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
761 shared.vm_set.list) {
762 if (vma->vm_flags & VM_LOCKED)
763 continue;
764 cursor = (unsigned long) vma->vm_private_data;
765 while ( cursor < max_nl_cursor &&
766 cursor < vma->vm_end - vma->vm_start) {
767 try_to_unmap_cluster(cursor, &mapcount, vma);
768 cursor += CLUSTER_SIZE;
769 vma->vm_private_data = (void *) cursor;
770 if ((int)mapcount <= 0)
771 goto out;
772 }
773 vma->vm_private_data = (void *) max_nl_cursor;
774 }
775 cond_resched_lock(&mapping->i_mmap_lock);
776 max_nl_cursor += CLUSTER_SIZE;
777 } while (max_nl_cursor <= max_nl_size);
778
779 /*
780 * Don't loop forever (perhaps all the remaining pages are
781 * in locked vmas). Reset cursor on all unreserved nonlinear
782 * vmas, now forgetting on which ones it had fallen behind.
783 */
784 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
785 vma->vm_private_data = NULL;
786 out:
787 spin_unlock(&mapping->i_mmap_lock);
788 return ret;
789 }
790
791 /**
792 * try_to_unmap - try to remove all page table mappings to a page
793 * @page: the page to get unmapped
794 *
795 * Tries to remove all the page table entries which are mapping this
796 * page, used in the pageout path. Caller must hold the page lock.
797 * Return values are:
798 *
799 * SWAP_SUCCESS - we succeeded in removing all mappings
800 * SWAP_AGAIN - we missed a mapping, try again later
801 * SWAP_FAIL - the page is unswappable
802 */
803 int try_to_unmap(struct page *page)
804 {
805 int ret;
806
807 BUG_ON(!PageLocked(page));
808
809 if (PageAnon(page))
810 ret = try_to_unmap_anon(page);
811 else
812 ret = try_to_unmap_file(page);
813
814 if (!page_mapped(page))
815 ret = SWAP_SUCCESS;
816 return ret;
817 }
818
This page took 0.057972 seconds and 5 git commands to generate.