cpu hotplug: intel_cacheinfo: fix cpu hotplug error handling
[deliverable/linux.git] / mm / swap_state.c
1 /*
2 * linux/mm/swap_state.c
3 *
4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
5 * Swap reorganised 29.12.95, Stephen Tweedie
6 *
7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
8 */
9 #include <linux/module.h>
10 #include <linux/mm.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/pagemap.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/migrate.h>
19
20 #include <asm/pgtable.h>
21
22 /*
23 * swapper_space is a fiction, retained to simplify the path through
24 * vmscan's shrink_page_list, to make sync_page look nicer, and to allow
25 * future use of radix_tree tags in the swap cache.
26 */
27 static const struct address_space_operations swap_aops = {
28 .writepage = swap_writepage,
29 .sync_page = block_sync_page,
30 .set_page_dirty = __set_page_dirty_nobuffers,
31 .migratepage = migrate_page,
32 };
33
34 static struct backing_dev_info swap_backing_dev_info = {
35 .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
36 .unplug_io_fn = swap_unplug_io_fn,
37 };
38
39 struct address_space swapper_space = {
40 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN),
41 .tree_lock = __RW_LOCK_UNLOCKED(swapper_space.tree_lock),
42 .a_ops = &swap_aops,
43 .i_mmap_nonlinear = LIST_HEAD_INIT(swapper_space.i_mmap_nonlinear),
44 .backing_dev_info = &swap_backing_dev_info,
45 };
46
47 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0)
48
49 static struct {
50 unsigned long add_total;
51 unsigned long del_total;
52 unsigned long find_success;
53 unsigned long find_total;
54 unsigned long noent_race;
55 unsigned long exist_race;
56 } swap_cache_info;
57
58 void show_swap_cache_info(void)
59 {
60 printk("Swap cache: add %lu, delete %lu, find %lu/%lu, race %lu+%lu\n",
61 swap_cache_info.add_total, swap_cache_info.del_total,
62 swap_cache_info.find_success, swap_cache_info.find_total,
63 swap_cache_info.noent_race, swap_cache_info.exist_race);
64 printk("Free swap = %lukB\n", nr_swap_pages << (PAGE_SHIFT - 10));
65 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
66 }
67
68 /*
69 * __add_to_swap_cache resembles add_to_page_cache on swapper_space,
70 * but sets SwapCache flag and private instead of mapping and index.
71 */
72 static int __add_to_swap_cache(struct page *page, swp_entry_t entry,
73 gfp_t gfp_mask)
74 {
75 int error;
76
77 BUG_ON(!PageLocked(page));
78 BUG_ON(PageSwapCache(page));
79 BUG_ON(PagePrivate(page));
80 error = radix_tree_preload(gfp_mask);
81 if (!error) {
82 write_lock_irq(&swapper_space.tree_lock);
83 error = radix_tree_insert(&swapper_space.page_tree,
84 entry.val, page);
85 if (!error) {
86 page_cache_get(page);
87 SetPageSwapCache(page);
88 set_page_private(page, entry.val);
89 total_swapcache_pages++;
90 __inc_zone_page_state(page, NR_FILE_PAGES);
91 }
92 write_unlock_irq(&swapper_space.tree_lock);
93 radix_tree_preload_end();
94 }
95 return error;
96 }
97
98 static int add_to_swap_cache(struct page *page, swp_entry_t entry)
99 {
100 int error;
101
102 BUG_ON(PageLocked(page));
103 if (!swap_duplicate(entry)) {
104 INC_CACHE_INFO(noent_race);
105 return -ENOENT;
106 }
107 SetPageLocked(page);
108 error = __add_to_swap_cache(page, entry, GFP_KERNEL);
109 /*
110 * Anon pages are already on the LRU, we don't run lru_cache_add here.
111 */
112 if (error) {
113 ClearPageLocked(page);
114 swap_free(entry);
115 if (error == -EEXIST)
116 INC_CACHE_INFO(exist_race);
117 return error;
118 }
119 INC_CACHE_INFO(add_total);
120 return 0;
121 }
122
123 /*
124 * This must be called only on pages that have
125 * been verified to be in the swap cache.
126 */
127 void __delete_from_swap_cache(struct page *page)
128 {
129 BUG_ON(!PageLocked(page));
130 BUG_ON(!PageSwapCache(page));
131 BUG_ON(PageWriteback(page));
132 BUG_ON(PagePrivate(page));
133
134 radix_tree_delete(&swapper_space.page_tree, page_private(page));
135 set_page_private(page, 0);
136 ClearPageSwapCache(page);
137 total_swapcache_pages--;
138 __dec_zone_page_state(page, NR_FILE_PAGES);
139 INC_CACHE_INFO(del_total);
140 }
141
142 /**
143 * add_to_swap - allocate swap space for a page
144 * @page: page we want to move to swap
145 *
146 * Allocate swap space for the page and add the page to the
147 * swap cache. Caller needs to hold the page lock.
148 */
149 int add_to_swap(struct page * page, gfp_t gfp_mask)
150 {
151 swp_entry_t entry;
152 int err;
153
154 BUG_ON(!PageLocked(page));
155
156 for (;;) {
157 entry = get_swap_page();
158 if (!entry.val)
159 return 0;
160
161 /*
162 * Radix-tree node allocations from PF_MEMALLOC contexts could
163 * completely exhaust the page allocator. __GFP_NOMEMALLOC
164 * stops emergency reserves from being allocated.
165 *
166 * TODO: this could cause a theoretical memory reclaim
167 * deadlock in the swap out path.
168 */
169 /*
170 * Add it to the swap cache and mark it dirty
171 */
172 err = __add_to_swap_cache(page, entry,
173 gfp_mask|__GFP_NOMEMALLOC|__GFP_NOWARN);
174
175 switch (err) {
176 case 0: /* Success */
177 SetPageUptodate(page);
178 SetPageDirty(page);
179 INC_CACHE_INFO(add_total);
180 return 1;
181 case -EEXIST:
182 /* Raced with "speculative" read_swap_cache_async */
183 INC_CACHE_INFO(exist_race);
184 swap_free(entry);
185 continue;
186 default:
187 /* -ENOMEM radix-tree allocation failure */
188 swap_free(entry);
189 return 0;
190 }
191 }
192 }
193
194 /*
195 * This must be called only on pages that have
196 * been verified to be in the swap cache and locked.
197 * It will never put the page into the free list,
198 * the caller has a reference on the page.
199 */
200 void delete_from_swap_cache(struct page *page)
201 {
202 swp_entry_t entry;
203
204 entry.val = page_private(page);
205
206 write_lock_irq(&swapper_space.tree_lock);
207 __delete_from_swap_cache(page);
208 write_unlock_irq(&swapper_space.tree_lock);
209
210 swap_free(entry);
211 page_cache_release(page);
212 }
213
214 /*
215 * Strange swizzling function only for use by shmem_writepage
216 */
217 int move_to_swap_cache(struct page *page, swp_entry_t entry)
218 {
219 int err = __add_to_swap_cache(page, entry, GFP_ATOMIC);
220 if (!err) {
221 remove_from_page_cache(page);
222 page_cache_release(page); /* pagecache ref */
223 if (!swap_duplicate(entry))
224 BUG();
225 SetPageDirty(page);
226 INC_CACHE_INFO(add_total);
227 } else if (err == -EEXIST)
228 INC_CACHE_INFO(exist_race);
229 return err;
230 }
231
232 /*
233 * Strange swizzling function for shmem_getpage (and shmem_unuse)
234 */
235 int move_from_swap_cache(struct page *page, unsigned long index,
236 struct address_space *mapping)
237 {
238 int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC);
239 if (!err) {
240 delete_from_swap_cache(page);
241 /* shift page from clean_pages to dirty_pages list */
242 ClearPageDirty(page);
243 set_page_dirty(page);
244 }
245 return err;
246 }
247
248 /*
249 * If we are the only user, then try to free up the swap cache.
250 *
251 * Its ok to check for PageSwapCache without the page lock
252 * here because we are going to recheck again inside
253 * exclusive_swap_page() _with_ the lock.
254 * - Marcelo
255 */
256 static inline void free_swap_cache(struct page *page)
257 {
258 if (PageSwapCache(page) && !TestSetPageLocked(page)) {
259 remove_exclusive_swap_page(page);
260 unlock_page(page);
261 }
262 }
263
264 /*
265 * Perform a free_page(), also freeing any swap cache associated with
266 * this page if it is the last user of the page.
267 */
268 void free_page_and_swap_cache(struct page *page)
269 {
270 free_swap_cache(page);
271 page_cache_release(page);
272 }
273
274 /*
275 * Passed an array of pages, drop them all from swapcache and then release
276 * them. They are removed from the LRU and freed if this is their last use.
277 */
278 void free_pages_and_swap_cache(struct page **pages, int nr)
279 {
280 struct page **pagep = pages;
281
282 lru_add_drain();
283 while (nr) {
284 int todo = min(nr, PAGEVEC_SIZE);
285 int i;
286
287 for (i = 0; i < todo; i++)
288 free_swap_cache(pagep[i]);
289 release_pages(pagep, todo, 0);
290 pagep += todo;
291 nr -= todo;
292 }
293 }
294
295 /*
296 * Lookup a swap entry in the swap cache. A found page will be returned
297 * unlocked and with its refcount incremented - we rely on the kernel
298 * lock getting page table operations atomic even if we drop the page
299 * lock before returning.
300 */
301 struct page * lookup_swap_cache(swp_entry_t entry)
302 {
303 struct page *page;
304
305 page = find_get_page(&swapper_space, entry.val);
306
307 if (page)
308 INC_CACHE_INFO(find_success);
309
310 INC_CACHE_INFO(find_total);
311 return page;
312 }
313
314 /*
315 * Locate a page of swap in physical memory, reserving swap cache space
316 * and reading the disk if it is not already cached.
317 * A failure return means that either the page allocation failed or that
318 * the swap entry is no longer in use.
319 */
320 struct page *read_swap_cache_async(swp_entry_t entry,
321 struct vm_area_struct *vma, unsigned long addr)
322 {
323 struct page *found_page, *new_page = NULL;
324 int err;
325
326 do {
327 /*
328 * First check the swap cache. Since this is normally
329 * called after lookup_swap_cache() failed, re-calling
330 * that would confuse statistics.
331 */
332 found_page = find_get_page(&swapper_space, entry.val);
333 if (found_page)
334 break;
335
336 /*
337 * Get a new page to read into from swap.
338 */
339 if (!new_page) {
340 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
341 vma, addr);
342 if (!new_page)
343 break; /* Out of memory */
344 }
345
346 /*
347 * Associate the page with swap entry in the swap cache.
348 * May fail (-ENOENT) if swap entry has been freed since
349 * our caller observed it. May fail (-EEXIST) if there
350 * is already a page associated with this entry in the
351 * swap cache: added by a racing read_swap_cache_async,
352 * or by try_to_swap_out (or shmem_writepage) re-using
353 * the just freed swap entry for an existing page.
354 * May fail (-ENOMEM) if radix-tree node allocation failed.
355 */
356 err = add_to_swap_cache(new_page, entry);
357 if (!err) {
358 /*
359 * Initiate read into locked page and return.
360 */
361 lru_cache_add_active(new_page);
362 swap_readpage(NULL, new_page);
363 return new_page;
364 }
365 } while (err != -ENOENT && err != -ENOMEM);
366
367 if (new_page)
368 page_cache_release(new_page);
369 return found_page;
370 }
This page took 0.046382 seconds and 5 git commands to generate.