2 * linux/mm/swap_state.c
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 #include <linux/module.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
21 #include <asm/pgtable.h>
24 * swapper_space is a fiction, retained to simplify the path through
25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
26 * future use of radix_tree tags in the swap cache.
28 static const struct address_space_operations swap_aops
= {
29 .writepage
= swap_writepage
,
30 .sync_page
= block_sync_page
,
31 .set_page_dirty
= __set_page_dirty_nobuffers
,
32 .migratepage
= migrate_page
,
35 static struct backing_dev_info swap_backing_dev_info
= {
36 .capabilities
= BDI_CAP_NO_ACCT_DIRTY
| BDI_CAP_NO_WRITEBACK
,
37 .unplug_io_fn
= swap_unplug_io_fn
,
40 struct address_space swapper_space
= {
41 .page_tree
= RADIX_TREE_INIT(GFP_ATOMIC
|__GFP_NOWARN
),
42 .tree_lock
= __RW_LOCK_UNLOCKED(swapper_space
.tree_lock
),
44 .i_mmap_nonlinear
= LIST_HEAD_INIT(swapper_space
.i_mmap_nonlinear
),
45 .backing_dev_info
= &swap_backing_dev_info
,
48 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
51 unsigned long add_total
;
52 unsigned long del_total
;
53 unsigned long find_success
;
54 unsigned long find_total
;
57 void show_swap_cache_info(void)
59 printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
60 swap_cache_info
.add_total
, swap_cache_info
.del_total
,
61 swap_cache_info
.find_success
, swap_cache_info
.find_total
);
62 printk("Free swap = %lukB\n", nr_swap_pages
<< (PAGE_SHIFT
- 10));
63 printk("Total swap = %lukB\n", total_swap_pages
<< (PAGE_SHIFT
- 10));
67 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
70 static int __add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
75 BUG_ON(!PageLocked(page
));
76 BUG_ON(PageSwapCache(page
));
77 BUG_ON(PagePrivate(page
));
78 error
= radix_tree_preload(gfp_mask
);
80 write_lock_irq(&swapper_space
.tree_lock
);
81 error
= radix_tree_insert(&swapper_space
.page_tree
,
85 SetPageSwapCache(page
);
86 set_page_private(page
, entry
.val
);
87 total_swapcache_pages
++;
88 __inc_zone_page_state(page
, NR_FILE_PAGES
);
89 INC_CACHE_INFO(add_total
);
91 write_unlock_irq(&swapper_space
.tree_lock
);
92 radix_tree_preload_end();
97 static int add_to_swap_cache(struct page
*page
, swp_entry_t entry
,
102 BUG_ON(PageLocked(page
));
103 if (!swap_duplicate(entry
))
107 error
= __add_to_swap_cache(page
, entry
, gfp_mask
& GFP_KERNEL
);
109 * Anon pages are already on the LRU, we don't run lru_cache_add here.
112 ClearPageLocked(page
);
120 * This must be called only on pages that have
121 * been verified to be in the swap cache.
123 void __delete_from_swap_cache(struct page
*page
)
125 BUG_ON(!PageLocked(page
));
126 BUG_ON(!PageSwapCache(page
));
127 BUG_ON(PageWriteback(page
));
128 BUG_ON(PagePrivate(page
));
130 radix_tree_delete(&swapper_space
.page_tree
, page_private(page
));
131 set_page_private(page
, 0);
132 ClearPageSwapCache(page
);
133 total_swapcache_pages
--;
134 __dec_zone_page_state(page
, NR_FILE_PAGES
);
135 INC_CACHE_INFO(del_total
);
139 * add_to_swap - allocate swap space for a page
140 * @page: page we want to move to swap
142 * Allocate swap space for the page and add the page to the
143 * swap cache. Caller needs to hold the page lock.
145 int add_to_swap(struct page
* page
, gfp_t gfp_mask
)
150 BUG_ON(!PageLocked(page
));
153 entry
= get_swap_page();
158 * Radix-tree node allocations from PF_MEMALLOC contexts could
159 * completely exhaust the page allocator. __GFP_NOMEMALLOC
160 * stops emergency reserves from being allocated.
162 * TODO: this could cause a theoretical memory reclaim
163 * deadlock in the swap out path.
166 * Add it to the swap cache and mark it dirty
168 err
= __add_to_swap_cache(page
, entry
,
169 gfp_mask
|__GFP_NOMEMALLOC
|__GFP_NOWARN
);
172 case 0: /* Success */
173 SetPageUptodate(page
);
177 /* Raced with "speculative" read_swap_cache_async */
181 /* -ENOMEM radix-tree allocation failure */
189 * This must be called only on pages that have
190 * been verified to be in the swap cache and locked.
191 * It will never put the page into the free list,
192 * the caller has a reference on the page.
194 void delete_from_swap_cache(struct page
*page
)
198 entry
.val
= page_private(page
);
200 write_lock_irq(&swapper_space
.tree_lock
);
201 __delete_from_swap_cache(page
);
202 write_unlock_irq(&swapper_space
.tree_lock
);
205 page_cache_release(page
);
209 * Strange swizzling function only for use by shmem_writepage
211 int move_to_swap_cache(struct page
*page
, swp_entry_t entry
)
213 int err
= __add_to_swap_cache(page
, entry
, GFP_ATOMIC
);
215 remove_from_page_cache(page
);
216 page_cache_release(page
); /* pagecache ref */
217 if (!swap_duplicate(entry
))
225 * Strange swizzling function for shmem_getpage (and shmem_unuse)
227 int move_from_swap_cache(struct page
*page
, unsigned long index
,
228 struct address_space
*mapping
)
230 int err
= add_to_page_cache(page
, mapping
, index
, GFP_ATOMIC
);
232 delete_from_swap_cache(page
);
233 /* shift page from clean_pages to dirty_pages list */
234 ClearPageDirty(page
);
235 set_page_dirty(page
);
241 * If we are the only user, then try to free up the swap cache.
243 * Its ok to check for PageSwapCache without the page lock
244 * here because we are going to recheck again inside
245 * exclusive_swap_page() _with_ the lock.
248 static inline void free_swap_cache(struct page
*page
)
250 if (PageSwapCache(page
) && !TestSetPageLocked(page
)) {
251 remove_exclusive_swap_page(page
);
257 * Perform a free_page(), also freeing any swap cache associated with
258 * this page if it is the last user of the page.
260 void free_page_and_swap_cache(struct page
*page
)
262 free_swap_cache(page
);
263 page_cache_release(page
);
267 * Passed an array of pages, drop them all from swapcache and then release
268 * them. They are removed from the LRU and freed if this is their last use.
270 void free_pages_and_swap_cache(struct page
**pages
, int nr
)
272 struct page
**pagep
= pages
;
276 int todo
= min(nr
, PAGEVEC_SIZE
);
279 for (i
= 0; i
< todo
; i
++)
280 free_swap_cache(pagep
[i
]);
281 release_pages(pagep
, todo
, 0);
288 * Lookup a swap entry in the swap cache. A found page will be returned
289 * unlocked and with its refcount incremented - we rely on the kernel
290 * lock getting page table operations atomic even if we drop the page
291 * lock before returning.
293 struct page
* lookup_swap_cache(swp_entry_t entry
)
297 page
= find_get_page(&swapper_space
, entry
.val
);
300 INC_CACHE_INFO(find_success
);
302 INC_CACHE_INFO(find_total
);
307 * Locate a page of swap in physical memory, reserving swap cache space
308 * and reading the disk if it is not already cached.
309 * A failure return means that either the page allocation failed or that
310 * the swap entry is no longer in use.
312 struct page
*read_swap_cache_async(swp_entry_t entry
, gfp_t gfp_mask
,
313 struct vm_area_struct
*vma
, unsigned long addr
)
315 struct page
*found_page
, *new_page
= NULL
;
320 * First check the swap cache. Since this is normally
321 * called after lookup_swap_cache() failed, re-calling
322 * that would confuse statistics.
324 found_page
= find_get_page(&swapper_space
, entry
.val
);
329 * Get a new page to read into from swap.
332 new_page
= alloc_page_vma(gfp_mask
, vma
, addr
);
334 break; /* Out of memory */
338 * Associate the page with swap entry in the swap cache.
339 * May fail (-ENOENT) if swap entry has been freed since
340 * our caller observed it. May fail (-EEXIST) if there
341 * is already a page associated with this entry in the
342 * swap cache: added by a racing read_swap_cache_async,
343 * or by try_to_swap_out (or shmem_writepage) re-using
344 * the just freed swap entry for an existing page.
345 * May fail (-ENOMEM) if radix-tree node allocation failed.
347 err
= add_to_swap_cache(new_page
, entry
, gfp_mask
);
350 * Initiate read into locked page and return.
352 lru_cache_add_active(new_page
);
353 swap_readpage(NULL
, new_page
);
356 } while (err
!= -ENOENT
&& err
!= -ENOMEM
);
359 page_cache_release(new_page
);
364 * swapin_readahead - swap in pages in hope we need them soon
365 * @entry: swap entry of this memory
366 * @vma: user vma this address belongs to
367 * @addr: target address for mempolicy
369 * Returns the struct page for entry and addr, after queueing swapin.
371 * Primitive swap readahead code. We simply read an aligned block of
372 * (1 << page_cluster) entries in the swap area. This method is chosen
373 * because it doesn't cost us any seek time. We also make sure to queue
374 * the 'original' request together with the readahead ones...
376 * This has been extended to use the NUMA policies from the mm triggering
379 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
381 struct page
*swapin_readahead(swp_entry_t entry
, gfp_t gfp_mask
,
382 struct vm_area_struct
*vma
, unsigned long addr
)
386 unsigned long offset
;
387 unsigned long end_offset
;
390 * Get starting offset for readaround, and number of pages to read.
391 * Adjust starting address by readbehind (for NUMA interleave case)?
392 * No, it's very unlikely that swap layout would follow vma layout,
393 * more likely that neighbouring swap pages came from the same node:
394 * so use the same "addr" to choose the same node for each swap read.
396 nr_pages
= valid_swaphandles(entry
, &offset
);
397 for (end_offset
= offset
+ nr_pages
; offset
< end_offset
; offset
++) {
398 /* Ok, do the async read-ahead now */
399 page
= read_swap_cache_async(swp_entry(swp_type(entry
), offset
),
400 gfp_mask
, vma
, addr
);
403 page_cache_release(page
);
405 lru_add_drain(); /* Push any new pages onto the LRU now */
406 return read_swap_cache_async(entry
, gfp_mask
, vma
, addr
);