mm: spinlock tree_lock
[deliverable/linux.git] / mm / swap_state.c
1 /*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/buffer_head.h>
17 #include <linux/backing-dev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20
21 #include <asm/pgtable.h>
22
23 /*
24 * swapper_space is a fiction, retained to simplify the path through
25 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
26 * future use of radix_tree tags in the swap cache.
27 */
28 static const struct address_space_operations swap_aops = {
29 .writepage = swap_writepage,
30 .sync_page = block_sync_page,
31 .set_page_dirty = __set_page_dirty_nobuffers,
32 .migratepage = migrate_page,
33 };
34
35 static struct backing_dev_info swap_backing_dev_info = {
36 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
37 .unplug_io_fn = swap_unplug_io_fn,
38 };
39
40 struct address_space swapper_space = {
41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
42 .tree_lock = __SPIN_LOCK_UNLOCKED(swapper_space.tree_lock),
43 .a_ops = &swap_aops,
44 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
45 .backing_dev_info = &swap_backing_dev_info,
46 };
47
48 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
49
50 static struct {
51 unsigned long add_total;
52 unsigned long del_total;
53 unsigned long find_success;
54 unsigned long find_total;
55 } swap_cache_info;
56
57 void show_swap_cache_info(void)
58 {
59 printk("Swap cache: add %lu, delete %lu, find %lu/%lu\n",
60 swap_cache_info.add_total, swap_cache_info.del_total,
61 swap_cache_info.find_success, swap_cache_info.find_total);
62 printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
63 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
64 }
65
66 /*
67 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
68 * but sets SwapCache flag and private instead of mapping and index.
69 */
70 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
71 {
72 int error;
73
74 BUG_ON(!PageLocked(page));
75 BUG_ON(PageSwapCache(page));
76 BUG_ON(PagePrivate(page));
77 error = radix_tree_preload(gfp_mask);
78 if (!error) {
79 page_cache_get(page);
80 SetPageSwapCache(page);
81 set_page_private(page, entry.val);
82
83 spin_lock_irq(&swapper_space.tree_lock);
84 error = radix_tree_insert(&swapper_space.page_tree,
85 entry.val, page);
86 if (likely(!error)) {
87 total_swapcache_pages++;
88 __inc_zone_page_state(page, NR_FILE_PAGES);
89 INC_CACHE_INFO(add_total);
90 }
91 spin_unlock_irq(&swapper_space.tree_lock);
92 radix_tree_preload_end();
93
94 if (unlikely(error)) {
95 set_page_private(page, 0UL);
96 ClearPageSwapCache(page);
97 page_cache_release(page);
98 }
99 }
100 return error;
101 }
102
103 /*
104 * This must be called only on pages that have
105 * been verified to be in the swap cache.
106 */
107 void __delete_from_swap_cache(struct page *page)
108 {
109 BUG_ON(!PageLocked(page));
110 BUG_ON(!PageSwapCache(page));
111 BUG_ON(PageWriteback(page));
112 BUG_ON(PagePrivate(page));
113
114 radix_tree_delete(&swapper_space.page_tree, page_private(page));
115 set_page_private(page, 0);
116 ClearPageSwapCache(page);
117 total_swapcache_pages--;
118 __dec_zone_page_state(page, NR_FILE_PAGES);
119 INC_CACHE_INFO(del_total);
120 }
121
122 /**
123 * add_to_swap - allocate swap space for a page
124 * @page: page we want to move to swap
125 * @gfp_mask: memory allocation flags
126 *
127 * Allocate swap space for the page and add the page to the
128 * swap cache. Caller needs to hold the page lock.
129 */
130 int add_to_swap(struct page * page, gfp_t gfp_mask)
131 {
132 swp_entry_t entry;
133 int err;
134
135 BUG_ON(!PageLocked(page));
136 BUG_ON(!PageUptodate(page));
137
138 for (;;) {
139 entry = get_swap_page();
140 if (!entry.val)
141 return 0;
142
143 /*
144 * Radix-tree node allocations from PF_MEMALLOC contexts could
145 * completely exhaust the page allocator. __GFP_NOMEMALLOC
146 * stops emergency reserves from being allocated.
147 *
148 * TODO: this could cause a theoretical memory reclaim
149 * deadlock in the swap out path.
150 */
151 /*
152 * Add it to the swap cache and mark it dirty
153 */
154 err = add_to_swap_cache(page, entry,
155 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
156
157 switch (err) {
158 case 0: /* Success */
159 SetPageDirty(page);
160 return 1;
161 case -EEXIST:
162 /* Raced with "speculative" read_swap_cache_async */
163 swap_free(entry);
164 continue;
165 default:
166 /* -ENOMEM radix-tree allocation failure */
167 swap_free(entry);
168 return 0;
169 }
170 }
171 }
172
173 /*
174 * This must be called only on pages that have
175 * been verified to be in the swap cache and locked.
176 * It will never put the page into the free list,
177 * the caller has a reference on the page.
178 */
179 void delete_from_swap_cache(struct page *page)
180 {
181 swp_entry_t entry;
182
183 entry.val = page_private(page);
184
185 spin_lock_irq(&swapper_space.tree_lock);
186 __delete_from_swap_cache(page);
187 spin_unlock_irq(&swapper_space.tree_lock);
188
189 swap_free(entry);
190 page_cache_release(page);
191 }
192
193 /*
194 * If we are the only user, then try to free up the swap cache.
195 *
196 * Its ok to check for PageSwapCache without the page lock
197 * here because we are going to recheck again inside
198 * exclusive_swap_page() _with_ the lock.
199 * - Marcelo
200 */
201 static inline void free_swap_cache(struct page *page)
202 {
203 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
204 remove_exclusive_swap_page(page);
205 unlock_page(page);
206 }
207 }
208
209 /*
210 * Perform a free_page(), also freeing any swap cache associated with
211 * this page if it is the last user of the page.
212 */
213 void free_page_and_swap_cache(struct page *page)
214 {
215 free_swap_cache(page);
216 page_cache_release(page);
217 }
218
219 /*
220 * Passed an array of pages, drop them all from swapcache and then release
221 * them. They are removed from the LRU and freed if this is their last use.
222 */
223 void free_pages_and_swap_cache(struct page **pages, int nr)
224 {
225 struct page **pagep = pages;
226
227 lru_add_drain();
228 while (nr) {
229 int todo = min(nr, PAGEVEC_SIZE);
230 int i;
231
232 for (i = 0; i < todo; i++)
233 free_swap_cache(pagep[i]);
234 release_pages(pagep, todo, 0);
235 pagep += todo;
236 nr -= todo;
237 }
238 }
239
240 /*
241 * Lookup a swap entry in the swap cache. A found page will be returned
242 * unlocked and with its refcount incremented - we rely on the kernel
243 * lock getting page table operations atomic even if we drop the page
244 * lock before returning.
245 */
246 struct page * lookup_swap_cache(swp_entry_t entry)
247 {
248 struct page *page;
249
250 page = find_get_page(&swapper_space, entry.val);
251
252 if (page)
253 INC_CACHE_INFO(find_success);
254
255 INC_CACHE_INFO(find_total);
256 return page;
257 }
258
259 /*
260 * Locate a page of swap in physical memory, reserving swap cache space
261 * and reading the disk if it is not already cached.
262 * A failure return means that either the page allocation failed or that
263 * the swap entry is no longer in use.
264 */
265 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
266 struct vm_area_struct *vma, unsigned long addr)
267 {
268 struct page *found_page, *new_page = NULL;
269 int err;
270
271 do {
272 /*
273 * First check the swap cache. Since this is normally
274 * called after lookup_swap_cache() failed, re-calling
275 * that would confuse statistics.
276 */
277 found_page = find_get_page(&swapper_space, entry.val);
278 if (found_page)
279 break;
280
281 /*
282 * Get a new page to read into from swap.
283 */
284 if (!new_page) {
285 new_page = alloc_page_vma(gfp_mask, vma, addr);
286 if (!new_page)
287 break; /* Out of memory */
288 }
289
290 /*
291 * Swap entry may have been freed since our caller observed it.
292 */
293 if (!swap_duplicate(entry))
294 break;
295
296 /*
297 * Associate the page with swap entry in the swap cache.
298 * May fail (-EEXIST) if there is already a page associated
299 * with this entry in the swap cache: added by a racing
300 * read_swap_cache_async, or add_to_swap or shmem_writepage
301 * re-using the just freed swap entry for an existing page.
302 * May fail (-ENOMEM) if radix-tree node allocation failed.
303 */
304 SetPageLocked(new_page);
305 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
306 if (!err) {
307 /*
308 * Initiate read into locked page and return.
309 */
310 lru_cache_add_active(new_page);
311 swap_readpage(NULL, new_page);
312 return new_page;
313 }
314 ClearPageLocked(new_page);
315 swap_free(entry);
316 } while (err != -ENOMEM);
317
318 if (new_page)
319 page_cache_release(new_page);
320 return found_page;
321 }
322
323 /**
324 * swapin_readahead - swap in pages in hope we need them soon
325 * @entry: swap entry of this memory
326 * @gfp_mask: memory allocation flags
327 * @vma: user vma this address belongs to
328 * @addr: target address for mempolicy
329 *
330 * Returns the struct page for entry and addr, after queueing swapin.
331 *
332 * Primitive swap readahead code. We simply read an aligned block of
333 * (1 << page_cluster) entries in the swap area. This method is chosen
334 * because it doesn't cost us any seek time. We also make sure to queue
335 * the 'original' request together with the readahead ones...
336 *
337 * This has been extended to use the NUMA policies from the mm triggering
338 * the readahead.
339 *
340 * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
341 */
342 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
343 struct vm_area_struct *vma, unsigned long addr)
344 {
345 int nr_pages;
346 struct page *page;
347 unsigned long offset;
348 unsigned long end_offset;
349
350 /*
351 * Get starting offset for readaround, and number of pages to read.
352 * Adjust starting address by readbehind (for NUMA interleave case)?
353 * No, it's very unlikely that swap layout would follow vma layout,
354 * more likely that neighbouring swap pages came from the same node:
355 * so use the same "addr" to choose the same node for each swap read.
356 */
357 nr_pages = valid_swaphandles(entry, &offset);
358 for (end_offset = offset + nr_pages; offset < end_offset; offset++) {
359 /* Ok, do the async read-ahead now */
360 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
361 gfp_mask, vma, addr);
362 if (!page)
363 break;
364 page_cache_release(page);
365 }
366 lru_add_drain(); /* Push any new pages onto the LRU now */
367 return read_swap_cache_async(entry, gfp_mask, vma, addr);
368 }
This page took 0.038582 seconds and 6 git commands to generate.