2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/page_cgroup.h>
22 #include <asm/pgtable.h>
25 * swapper_space is a fiction, retained to simplify the path through
26 * vmscan's shrink_page_list.
28 static const struct address_space_operations swap_aops
= {
29 .writepage
= swap_writepage
,
30 .set_page_dirty
= swap_set_page_dirty
,
31 .migratepage
= migrate_page
,
34 static struct backing_dev_info swap_backing_dev_info
= {
36 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
39 struct address_space swapper_spaces
[MAX_SWAPFILES
] = {
40 [0 ... MAX_SWAPFILES
- 1] = {
41 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
|__GFP_NOWARN
),
43 .backing_dev_info
= &swap_backing_dev_info
,
47 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
50 unsigned long add_total
;
51 unsigned long del_total
;
52 unsigned long find_success
;
53 unsigned long find_total
;
56 unsigned long total_swapcache_pages(void)
59 unsigned long ret
= 0;
61 for (i
= 0; i
< MAX_SWAPFILES
; i
++)
62 ret
+= swapper_spaces
[i
].nrpages
;
66 void show_swap_cache_info(void)
68 printk("%lu pages in swap cache\n", total_swapcache_pages());
69 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
70 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
71 swap_cache_info
.find_success
, swap_cache_info
.find_total
);
72 printk("Free swap = %ldkB\n", nr_swap_pages
<< (PAGE_SHIFT
- 10));
73 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
77 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
78 * but sets SwapCache flag and private instead of mapping and index.
80 static int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
)
83 struct address_space
*address_space
;
85 VM_BUG_ON(!PageLocked(page
));
86 VM_BUG_ON(PageSwapCache(page
));
87 VM_BUG_ON(!PageSwapBacked(page
));
90 SetPageSwapCache(page
);
91 set_page_private(page
, entry
.val
);
93 address_space
= swap_address_space(entry
);
94 spin_lock_irq(&address_space
->tree_lock
);
95 error
= radix_tree_insert(&address_space
->page_tree
,
98 address_space
->nrpages
++;
99 __inc_zone_page_state(page
, NR_FILE_PAGES
);
100 INC_CACHE_INFO(add_total
);
102 spin_unlock_irq(&address_space
->tree_lock
);
104 if (unlikely(error
)) {
106 * Only the context which have set SWAP_HAS_CACHE flag
107 * would call add_to_swap_cache().
108 * So add_to_swap_cache() doesn't returns -EEXIST.
110 VM_BUG_ON(error
== -EEXIST
);
111 set_page_private(page
, 0UL);
112 ClearPageSwapCache(page
);
113 page_cache_release(page
);
120 int add_to_swap_cache(struct page
*page
, swp_entry_t entry
, gfp_t gfp_mask
)
124 error
= radix_tree_preload(gfp_mask
);
126 error
= __add_to_swap_cache(page
, entry
);
127 radix_tree_preload_end();
133 * This must be called only on pages that have
134 * been verified to be in the swap cache.
136 void __delete_from_swap_cache(struct page
*page
)
139 struct address_space
*address_space
;
141 VM_BUG_ON(!PageLocked(page
));
142 VM_BUG_ON(!PageSwapCache(page
));
143 VM_BUG_ON(PageWriteback(page
));
145 entry
.val
= page_private(page
);
146 address_space
= swap_address_space(entry
);
147 radix_tree_delete(&address_space
->page_tree
, page_private(page
));
148 set_page_private(page
, 0);
149 ClearPageSwapCache(page
);
150 address_space
->nrpages
--;
151 __dec_zone_page_state(page
, NR_FILE_PAGES
);
152 INC_CACHE_INFO(del_total
);
156 * add_to_swap - allocate swap space for a page
157 * @page: page we want to move to swap
159 * Allocate swap space for the page and add the page to the
160 * swap cache. Caller needs to hold the page lock.
162 int add_to_swap(struct page
*page
)
167 VM_BUG_ON(!PageLocked(page
));
168 VM_BUG_ON(!PageUptodate(page
));
170 entry
= get_swap_page();
174 if (unlikely(PageTransHuge(page
)))
175 if (unlikely(split_huge_page(page
))) {
176 swapcache_free(entry
, NULL
);
181 * Radix-tree node allocations from PF_MEMALLOC contexts could
182 * completely exhaust the page allocator. __GFP_NOMEMALLOC
183 * stops emergency reserves from being allocated.
185 * TODO: this could cause a theoretical memory reclaim
186 * deadlock in the swap out path.
189 * Add it to the swap cache and mark it dirty
191 err
= add_to_swap_cache(page
, entry
,
192 __GFP_HIGH
|__GFP_NOMEMALLOC
|__GFP_NOWARN
);
194 if (!err
) { /* Success */
197 } else { /* -ENOMEM radix-tree allocation failure */
199 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
200 * clear SWAP_HAS_CACHE flag.
202 swapcache_free(entry
, NULL
);
208 * This must be called only on pages that have
209 * been verified to be in the swap cache and locked.
210 * It will never put the page into the free list,
211 * the caller has a reference on the page.
213 void delete_from_swap_cache(struct page
*page
)
216 struct address_space
*address_space
;
218 entry
.val
= page_private(page
);
220 address_space
= swap_address_space(entry
);
221 spin_lock_irq(&address_space
->tree_lock
);
222 __delete_from_swap_cache(page
);
223 spin_unlock_irq(&address_space
->tree_lock
);
225 swapcache_free(entry
, page
);
226 page_cache_release(page
);
230 * If we are the only user, then try to free up the swap cache.
232 * Its ok to check for PageSwapCache without the page lock
233 * here because we are going to recheck again inside
234 * try_to_free_swap() _with_ the lock.
237 static inline void free_swap_cache(struct page
*page
)
239 if (PageSwapCache(page
) && !page_mapped(page
) && trylock_page(page
)) {
240 try_to_free_swap(page
);
246 * Perform a free_page(), also freeing any swap cache associated with
247 * this page if it is the last user of the page.
249 void free_page_and_swap_cache(struct page
*page
)
251 free_swap_cache(page
);
252 page_cache_release(page
);
256 * Passed an array of pages, drop them all from swapcache and then release
257 * them. They are removed from the LRU and freed if this is their last use.
259 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
261 struct page
**pagep
= pages
;
265 int todo
= min(nr
, PAGEVEC_SIZE
);
268 for (i
= 0; i
< todo
; i
++)
269 free_swap_cache(pagep
[i
]);
270 release_pages(pagep
, todo
, 0);
277 * Lookup a swap entry in the swap cache. A found page will be returned
278 * unlocked and with its refcount incremented - we rely on the kernel
279 * lock getting page table operations atomic even if we drop the page
280 * lock before returning.
282 struct page
* lookup_swap_cache(swp_entry_t entry
)
286 page
= find_get_page(swap_address_space(entry
), entry
.val
);
289 INC_CACHE_INFO(find_success
);
291 INC_CACHE_INFO(find_total
);
296 * Locate a page of swap in physical memory, reserving swap cache space
297 * and reading the disk if it is not already cached.
298 * A failure return means that either the page allocation failed or that
299 * the swap entry is no longer in use.
301 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
302 struct vm_area_struct
*vma
, unsigned long addr
)
304 struct page
*found_page
, *new_page
= NULL
;
309 * First check the swap cache. Since this is normally
310 * called after lookup_swap_cache() failed, re-calling
311 * that would confuse statistics.
313 found_page
= find_get_page(swap_address_space(entry
),
319 * Get a new page to read into from swap.
322 new_page
= alloc_page_vma(gfp_mask
, vma
, addr
);
324 break; /* Out of memory */
328 * call radix_tree_preload() while we can wait.
330 err
= radix_tree_preload(gfp_mask
& GFP_KERNEL
);
335 * Swap entry may have been freed since our caller observed it.
337 err
= swapcache_prepare(entry
);
338 if (err
== -EEXIST
) { /* seems racy */
339 radix_tree_preload_end();
342 if (err
) { /* swp entry is obsolete ? */
343 radix_tree_preload_end();
347 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
348 __set_page_locked(new_page
);
349 SetPageSwapBacked(new_page
);
350 err
= __add_to_swap_cache(new_page
, entry
);
352 radix_tree_preload_end();
354 * Initiate read into locked page and return.
356 lru_cache_add_anon(new_page
);
357 swap_readpage(new_page
);
360 radix_tree_preload_end();
361 ClearPageSwapBacked(new_page
);
362 __clear_page_locked(new_page
);
364 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
365 * clear SWAP_HAS_CACHE flag.
367 swapcache_free(entry
, NULL
);
368 } while (err
!= -ENOMEM
);
371 page_cache_release(new_page
);
376 * swapin_readahead - swap in pages in hope we need them soon
377 * @entry: swap entry of this memory
378 * @gfp_mask: memory allocation flags
379 * @vma: user vma this address belongs to
380 * @addr: target address for mempolicy
382 * Returns the struct page for entry and addr, after queueing swapin.
384 * Primitive swap readahead code. We simply read an aligned block of
385 * (1 << page_cluster) entries in the swap area. This method is chosen
386 * because it doesn't cost us any seek time. We also make sure to queue
387 * the 'original' request together with the readahead ones...
389 * This has been extended to use the NUMA policies from the mm triggering
392 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
394 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
395 struct vm_area_struct
*vma
, unsigned long addr
)
398 unsigned long offset
= swp_offset(entry
);
399 unsigned long start_offset
, end_offset
;
400 unsigned long mask
= (1UL << page_cluster
) - 1;
401 struct blk_plug plug
;
403 /* Read a page_cluster sized and aligned cluster around offset. */
404 start_offset
= offset
& ~mask
;
405 end_offset
= offset
| mask
;
406 if (!start_offset
) /* First page is swap header. */
409 blk_start_plug(&plug
);
410 for (offset
= start_offset
; offset
<= end_offset
; offset
++) {
411 /* Ok, do the async read-ahead now */
412 page
= read_swap_cache_async(swp_entry(swp_type(entry
), offset
),
413 gfp_mask
, vma
, addr
);
416 page_cache_release(page
);
418 blk_finish_plug(&plug
);
420 lru_add_drain(); /* Push any new pages onto the LRU now */
421 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
);