Commit | Line | Data |
---|---|---|
25761b6e | 1 | /* |
96bc7aec | 2 | * linux/kernel/power/snapshot.c |
25761b6e | 3 | * |
96bc7aec | 4 | * This file provide system snapshot/restore functionality. |
25761b6e RW |
5 | * |
6 | * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz> | |
7 | * | |
8 | * This file is released under the GPLv2, and is based on swsusp.c. | |
9 | * | |
10 | */ | |
11 | ||
12 | ||
f577eb30 | 13 | #include <linux/version.h> |
25761b6e RW |
14 | #include <linux/module.h> |
15 | #include <linux/mm.h> | |
16 | #include <linux/suspend.h> | |
17 | #include <linux/smp_lock.h> | |
25761b6e | 18 | #include <linux/delay.h> |
25761b6e | 19 | #include <linux/bitops.h> |
25761b6e | 20 | #include <linux/spinlock.h> |
25761b6e | 21 | #include <linux/kernel.h> |
25761b6e RW |
22 | #include <linux/pm.h> |
23 | #include <linux/device.h> | |
25761b6e RW |
24 | #include <linux/bootmem.h> |
25 | #include <linux/syscalls.h> | |
26 | #include <linux/console.h> | |
27 | #include <linux/highmem.h> | |
25761b6e RW |
28 | |
29 | #include <asm/uaccess.h> | |
30 | #include <asm/mmu_context.h> | |
31 | #include <asm/pgtable.h> | |
32 | #include <asm/tlbflush.h> | |
33 | #include <asm/io.h> | |
34 | ||
25761b6e RW |
35 | #include "power.h" |
36 | ||
75534b50 RW |
37 | /* List of PBEs used for creating and restoring the suspend image */ |
38 | struct pbe *restore_pblist; | |
39 | ||
f577eb30 RW |
40 | static unsigned int nr_copy_pages; |
41 | static unsigned int nr_meta_pages; | |
940864dd | 42 | static void *buffer; |
7088a5c0 | 43 | |
25761b6e | 44 | #ifdef CONFIG_HIGHMEM |
3448097f | 45 | unsigned int count_highmem_pages(void) |
72a97e08 RW |
46 | { |
47 | struct zone *zone; | |
48 | unsigned long zone_pfn; | |
49 | unsigned int n = 0; | |
50 | ||
51 | for_each_zone (zone) | |
52 | if (is_highmem(zone)) { | |
53 | mark_free_pages(zone); | |
54 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; zone_pfn++) { | |
55 | struct page *page; | |
56 | unsigned long pfn = zone_pfn + zone->zone_start_pfn; | |
57 | if (!pfn_valid(pfn)) | |
58 | continue; | |
59 | page = pfn_to_page(pfn); | |
60 | if (PageReserved(page)) | |
61 | continue; | |
62 | if (PageNosaveFree(page)) | |
63 | continue; | |
64 | n++; | |
65 | } | |
66 | } | |
67 | return n; | |
68 | } | |
69 | ||
25761b6e RW |
70 | struct highmem_page { |
71 | char *data; | |
72 | struct page *page; | |
73 | struct highmem_page *next; | |
74 | }; | |
75 | ||
76 | static struct highmem_page *highmem_copy; | |
77 | ||
78 | static int save_highmem_zone(struct zone *zone) | |
79 | { | |
80 | unsigned long zone_pfn; | |
81 | mark_free_pages(zone); | |
82 | for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { | |
83 | struct page *page; | |
84 | struct highmem_page *save; | |
85 | void *kaddr; | |
86 | unsigned long pfn = zone_pfn + zone->zone_start_pfn; | |
87 | ||
ce6ed29f | 88 | if (!(pfn%10000)) |
25761b6e RW |
89 | printk("."); |
90 | if (!pfn_valid(pfn)) | |
91 | continue; | |
92 | page = pfn_to_page(pfn); | |
93 | /* | |
94 | * This condition results from rvmalloc() sans vmalloc_32() | |
95 | * and architectural memory reservations. This should be | |
96 | * corrected eventually when the cases giving rise to this | |
97 | * are better understood. | |
98 | */ | |
c8adb494 | 99 | if (PageReserved(page)) |
25761b6e | 100 | continue; |
25761b6e RW |
101 | BUG_ON(PageNosave(page)); |
102 | if (PageNosaveFree(page)) | |
103 | continue; | |
104 | save = kmalloc(sizeof(struct highmem_page), GFP_ATOMIC); | |
105 | if (!save) | |
106 | return -ENOMEM; | |
107 | save->next = highmem_copy; | |
108 | save->page = page; | |
109 | save->data = (void *) get_zeroed_page(GFP_ATOMIC); | |
110 | if (!save->data) { | |
111 | kfree(save); | |
112 | return -ENOMEM; | |
113 | } | |
114 | kaddr = kmap_atomic(page, KM_USER0); | |
115 | memcpy(save->data, kaddr, PAGE_SIZE); | |
116 | kunmap_atomic(kaddr, KM_USER0); | |
117 | highmem_copy = save; | |
118 | } | |
119 | return 0; | |
120 | } | |
25761b6e | 121 | |
3448097f | 122 | int save_highmem(void) |
25761b6e | 123 | { |
25761b6e RW |
124 | struct zone *zone; |
125 | int res = 0; | |
126 | ||
ce6ed29f | 127 | pr_debug("swsusp: Saving Highmem"); |
e4e4d665 | 128 | drain_local_pages(); |
25761b6e RW |
129 | for_each_zone (zone) { |
130 | if (is_highmem(zone)) | |
131 | res = save_highmem_zone(zone); | |
132 | if (res) | |
133 | return res; | |
134 | } | |
ce6ed29f | 135 | printk("\n"); |
25761b6e RW |
136 | return 0; |
137 | } | |
138 | ||
3448097f | 139 | int restore_highmem(void) |
25761b6e | 140 | { |
25761b6e RW |
141 | printk("swsusp: Restoring Highmem\n"); |
142 | while (highmem_copy) { | |
143 | struct highmem_page *save = highmem_copy; | |
144 | void *kaddr; | |
145 | highmem_copy = save->next; | |
146 | ||
147 | kaddr = kmap_atomic(save->page, KM_USER0); | |
148 | memcpy(kaddr, save->data, PAGE_SIZE); | |
149 | kunmap_atomic(kaddr, KM_USER0); | |
150 | free_page((long) save->data); | |
151 | kfree(save); | |
152 | } | |
25761b6e RW |
153 | return 0; |
154 | } | |
ce4ab001 | 155 | #else |
7bff24e2 AB |
156 | static inline unsigned int count_highmem_pages(void) {return 0;} |
157 | static inline int save_highmem(void) {return 0;} | |
158 | static inline int restore_highmem(void) {return 0;} | |
0fbeb5a4 | 159 | #endif |
25761b6e | 160 | |
f6143aa6 RW |
161 | /** |
162 | * @safe_needed - on resume, for storing the PBE list and the image, | |
163 | * we can only use memory pages that do not conflict with the pages | |
164 | * used before suspend. | |
165 | * | |
166 | * The unsafe pages are marked with the PG_nosave_free flag | |
167 | * and we count them using unsafe_pages | |
168 | */ | |
169 | ||
0bcd888d RW |
170 | #define PG_ANY 0 |
171 | #define PG_SAFE 1 | |
172 | #define PG_UNSAFE_CLEAR 1 | |
173 | #define PG_UNSAFE_KEEP 0 | |
174 | ||
940864dd | 175 | static unsigned int allocated_unsafe_pages; |
f6143aa6 RW |
176 | |
177 | static void *alloc_image_page(gfp_t gfp_mask, int safe_needed) | |
178 | { | |
179 | void *res; | |
180 | ||
181 | res = (void *)get_zeroed_page(gfp_mask); | |
182 | if (safe_needed) | |
183 | while (res && PageNosaveFree(virt_to_page(res))) { | |
184 | /* The page is unsafe, mark it for swsusp_free() */ | |
185 | SetPageNosave(virt_to_page(res)); | |
940864dd | 186 | allocated_unsafe_pages++; |
f6143aa6 RW |
187 | res = (void *)get_zeroed_page(gfp_mask); |
188 | } | |
189 | if (res) { | |
190 | SetPageNosave(virt_to_page(res)); | |
191 | SetPageNosaveFree(virt_to_page(res)); | |
192 | } | |
193 | return res; | |
194 | } | |
195 | ||
196 | unsigned long get_safe_page(gfp_t gfp_mask) | |
197 | { | |
0bcd888d | 198 | return (unsigned long)alloc_image_page(gfp_mask, PG_SAFE); |
f6143aa6 RW |
199 | } |
200 | ||
201 | /** | |
202 | * free_image_page - free page represented by @addr, allocated with | |
203 | * alloc_image_page (page flags set by it must be cleared) | |
204 | */ | |
205 | ||
206 | static inline void free_image_page(void *addr, int clear_nosave_free) | |
207 | { | |
208 | ClearPageNosave(virt_to_page(addr)); | |
209 | if (clear_nosave_free) | |
210 | ClearPageNosaveFree(virt_to_page(addr)); | |
211 | free_page((unsigned long)addr); | |
212 | } | |
213 | ||
b788db79 RW |
214 | /* struct linked_page is used to build chains of pages */ |
215 | ||
216 | #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *)) | |
217 | ||
218 | struct linked_page { | |
219 | struct linked_page *next; | |
220 | char data[LINKED_PAGE_DATA_SIZE]; | |
221 | } __attribute__((packed)); | |
222 | ||
223 | static inline void | |
224 | free_list_of_pages(struct linked_page *list, int clear_page_nosave) | |
225 | { | |
226 | while (list) { | |
227 | struct linked_page *lp = list->next; | |
228 | ||
229 | free_image_page(list, clear_page_nosave); | |
230 | list = lp; | |
231 | } | |
232 | } | |
233 | ||
234 | /** | |
235 | * struct chain_allocator is used for allocating small objects out of | |
236 | * a linked list of pages called 'the chain'. | |
237 | * | |
238 | * The chain grows each time when there is no room for a new object in | |
239 | * the current page. The allocated objects cannot be freed individually. | |
240 | * It is only possible to free them all at once, by freeing the entire | |
241 | * chain. | |
242 | * | |
243 | * NOTE: The chain allocator may be inefficient if the allocated objects | |
244 | * are not much smaller than PAGE_SIZE. | |
245 | */ | |
246 | ||
247 | struct chain_allocator { | |
248 | struct linked_page *chain; /* the chain */ | |
249 | unsigned int used_space; /* total size of objects allocated out | |
250 | * of the current page | |
251 | */ | |
252 | gfp_t gfp_mask; /* mask for allocating pages */ | |
253 | int safe_needed; /* if set, only "safe" pages are allocated */ | |
254 | }; | |
255 | ||
256 | static void | |
257 | chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed) | |
258 | { | |
259 | ca->chain = NULL; | |
260 | ca->used_space = LINKED_PAGE_DATA_SIZE; | |
261 | ca->gfp_mask = gfp_mask; | |
262 | ca->safe_needed = safe_needed; | |
263 | } | |
264 | ||
265 | static void *chain_alloc(struct chain_allocator *ca, unsigned int size) | |
266 | { | |
267 | void *ret; | |
268 | ||
269 | if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) { | |
270 | struct linked_page *lp; | |
271 | ||
272 | lp = alloc_image_page(ca->gfp_mask, ca->safe_needed); | |
273 | if (!lp) | |
274 | return NULL; | |
275 | ||
276 | lp->next = ca->chain; | |
277 | ca->chain = lp; | |
278 | ca->used_space = 0; | |
279 | } | |
280 | ret = ca->chain->data + ca->used_space; | |
281 | ca->used_space += size; | |
282 | return ret; | |
283 | } | |
284 | ||
285 | static void chain_free(struct chain_allocator *ca, int clear_page_nosave) | |
286 | { | |
287 | free_list_of_pages(ca->chain, clear_page_nosave); | |
288 | memset(ca, 0, sizeof(struct chain_allocator)); | |
289 | } | |
290 | ||
291 | /** | |
292 | * Data types related to memory bitmaps. | |
293 | * | |
294 | * Memory bitmap is a structure consiting of many linked lists of | |
295 | * objects. The main list's elements are of type struct zone_bitmap | |
296 | * and each of them corresonds to one zone. For each zone bitmap | |
297 | * object there is a list of objects of type struct bm_block that | |
298 | * represent each blocks of bit chunks in which information is | |
299 | * stored. | |
300 | * | |
301 | * struct memory_bitmap contains a pointer to the main list of zone | |
302 | * bitmap objects, a struct bm_position used for browsing the bitmap, | |
303 | * and a pointer to the list of pages used for allocating all of the | |
304 | * zone bitmap objects and bitmap block objects. | |
305 | * | |
306 | * NOTE: It has to be possible to lay out the bitmap in memory | |
307 | * using only allocations of order 0. Additionally, the bitmap is | |
308 | * designed to work with arbitrary number of zones (this is over the | |
309 | * top for now, but let's avoid making unnecessary assumptions ;-). | |
310 | * | |
311 | * struct zone_bitmap contains a pointer to a list of bitmap block | |
312 | * objects and a pointer to the bitmap block object that has been | |
313 | * most recently used for setting bits. Additionally, it contains the | |
314 | * pfns that correspond to the start and end of the represented zone. | |
315 | * | |
316 | * struct bm_block contains a pointer to the memory page in which | |
317 | * information is stored (in the form of a block of bit chunks | |
318 | * of type unsigned long each). It also contains the pfns that | |
319 | * correspond to the start and end of the represented memory area and | |
320 | * the number of bit chunks in the block. | |
321 | * | |
322 | * NOTE: Memory bitmaps are used for two types of operations only: | |
323 | * "set a bit" and "find the next bit set". Moreover, the searching | |
324 | * is always carried out after all of the "set a bit" operations | |
325 | * on given bitmap. | |
326 | */ | |
327 | ||
328 | #define BM_END_OF_MAP (~0UL) | |
329 | ||
330 | #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long)) | |
331 | #define BM_BITS_PER_CHUNK (sizeof(long) << 3) | |
332 | #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3) | |
333 | ||
334 | struct bm_block { | |
335 | struct bm_block *next; /* next element of the list */ | |
336 | unsigned long start_pfn; /* pfn represented by the first bit */ | |
337 | unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ | |
338 | unsigned int size; /* number of bit chunks */ | |
339 | unsigned long *data; /* chunks of bits representing pages */ | |
340 | }; | |
341 | ||
342 | struct zone_bitmap { | |
343 | struct zone_bitmap *next; /* next element of the list */ | |
344 | unsigned long start_pfn; /* minimal pfn in this zone */ | |
345 | unsigned long end_pfn; /* maximal pfn in this zone plus 1 */ | |
346 | struct bm_block *bm_blocks; /* list of bitmap blocks */ | |
347 | struct bm_block *cur_block; /* recently used bitmap block */ | |
348 | }; | |
349 | ||
350 | /* strcut bm_position is used for browsing memory bitmaps */ | |
351 | ||
352 | struct bm_position { | |
353 | struct zone_bitmap *zone_bm; | |
354 | struct bm_block *block; | |
355 | int chunk; | |
356 | int bit; | |
357 | }; | |
358 | ||
359 | struct memory_bitmap { | |
360 | struct zone_bitmap *zone_bm_list; /* list of zone bitmaps */ | |
361 | struct linked_page *p_list; /* list of pages used to store zone | |
362 | * bitmap objects and bitmap block | |
363 | * objects | |
364 | */ | |
365 | struct bm_position cur; /* most recently used bit position */ | |
366 | }; | |
367 | ||
368 | /* Functions that operate on memory bitmaps */ | |
369 | ||
370 | static inline void memory_bm_reset_chunk(struct memory_bitmap *bm) | |
371 | { | |
372 | bm->cur.chunk = 0; | |
373 | bm->cur.bit = -1; | |
374 | } | |
375 | ||
376 | static void memory_bm_position_reset(struct memory_bitmap *bm) | |
377 | { | |
378 | struct zone_bitmap *zone_bm; | |
379 | ||
380 | zone_bm = bm->zone_bm_list; | |
381 | bm->cur.zone_bm = zone_bm; | |
382 | bm->cur.block = zone_bm->bm_blocks; | |
383 | memory_bm_reset_chunk(bm); | |
384 | } | |
385 | ||
386 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); | |
387 | ||
388 | /** | |
389 | * create_bm_block_list - create a list of block bitmap objects | |
390 | */ | |
391 | ||
392 | static inline struct bm_block * | |
393 | create_bm_block_list(unsigned int nr_blocks, struct chain_allocator *ca) | |
394 | { | |
395 | struct bm_block *bblist = NULL; | |
396 | ||
397 | while (nr_blocks-- > 0) { | |
398 | struct bm_block *bb; | |
399 | ||
400 | bb = chain_alloc(ca, sizeof(struct bm_block)); | |
401 | if (!bb) | |
402 | return NULL; | |
403 | ||
404 | bb->next = bblist; | |
405 | bblist = bb; | |
406 | } | |
407 | return bblist; | |
408 | } | |
409 | ||
410 | /** | |
411 | * create_zone_bm_list - create a list of zone bitmap objects | |
412 | */ | |
413 | ||
414 | static inline struct zone_bitmap * | |
415 | create_zone_bm_list(unsigned int nr_zones, struct chain_allocator *ca) | |
416 | { | |
417 | struct zone_bitmap *zbmlist = NULL; | |
418 | ||
419 | while (nr_zones-- > 0) { | |
420 | struct zone_bitmap *zbm; | |
421 | ||
422 | zbm = chain_alloc(ca, sizeof(struct zone_bitmap)); | |
423 | if (!zbm) | |
424 | return NULL; | |
425 | ||
426 | zbm->next = zbmlist; | |
427 | zbmlist = zbm; | |
428 | } | |
429 | return zbmlist; | |
430 | } | |
431 | ||
432 | /** | |
433 | * memory_bm_create - allocate memory for a memory bitmap | |
434 | */ | |
435 | ||
436 | static int | |
437 | memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed) | |
438 | { | |
439 | struct chain_allocator ca; | |
440 | struct zone *zone; | |
441 | struct zone_bitmap *zone_bm; | |
442 | struct bm_block *bb; | |
443 | unsigned int nr; | |
444 | ||
445 | chain_init(&ca, gfp_mask, safe_needed); | |
446 | ||
447 | /* Compute the number of zones */ | |
448 | nr = 0; | |
449 | for_each_zone (zone) | |
450 | if (populated_zone(zone) && !is_highmem(zone)) | |
451 | nr++; | |
452 | ||
453 | /* Allocate the list of zones bitmap objects */ | |
454 | zone_bm = create_zone_bm_list(nr, &ca); | |
455 | bm->zone_bm_list = zone_bm; | |
456 | if (!zone_bm) { | |
457 | chain_free(&ca, PG_UNSAFE_CLEAR); | |
458 | return -ENOMEM; | |
459 | } | |
460 | ||
461 | /* Initialize the zone bitmap objects */ | |
462 | for_each_zone (zone) { | |
463 | unsigned long pfn; | |
464 | ||
465 | if (!populated_zone(zone) || is_highmem(zone)) | |
466 | continue; | |
467 | ||
468 | zone_bm->start_pfn = zone->zone_start_pfn; | |
469 | zone_bm->end_pfn = zone->zone_start_pfn + zone->spanned_pages; | |
470 | /* Allocate the list of bitmap block objects */ | |
471 | nr = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | |
472 | bb = create_bm_block_list(nr, &ca); | |
473 | zone_bm->bm_blocks = bb; | |
474 | zone_bm->cur_block = bb; | |
475 | if (!bb) | |
476 | goto Free; | |
477 | ||
478 | nr = zone->spanned_pages; | |
479 | pfn = zone->zone_start_pfn; | |
480 | /* Initialize the bitmap block objects */ | |
481 | while (bb) { | |
482 | unsigned long *ptr; | |
483 | ||
484 | ptr = alloc_image_page(gfp_mask, safe_needed); | |
485 | bb->data = ptr; | |
486 | if (!ptr) | |
487 | goto Free; | |
488 | ||
489 | bb->start_pfn = pfn; | |
490 | if (nr >= BM_BITS_PER_BLOCK) { | |
491 | pfn += BM_BITS_PER_BLOCK; | |
492 | bb->size = BM_CHUNKS_PER_BLOCK; | |
493 | nr -= BM_BITS_PER_BLOCK; | |
494 | } else { | |
495 | /* This is executed only once in the loop */ | |
496 | pfn += nr; | |
497 | bb->size = DIV_ROUND_UP(nr, BM_BITS_PER_CHUNK); | |
498 | } | |
499 | bb->end_pfn = pfn; | |
500 | bb = bb->next; | |
501 | } | |
502 | zone_bm = zone_bm->next; | |
503 | } | |
504 | bm->p_list = ca.chain; | |
505 | memory_bm_position_reset(bm); | |
506 | return 0; | |
507 | ||
508 | Free: | |
509 | bm->p_list = ca.chain; | |
510 | memory_bm_free(bm, PG_UNSAFE_CLEAR); | |
511 | return -ENOMEM; | |
512 | } | |
513 | ||
514 | /** | |
515 | * memory_bm_free - free memory occupied by the memory bitmap @bm | |
516 | */ | |
517 | ||
518 | static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) | |
519 | { | |
520 | struct zone_bitmap *zone_bm; | |
521 | ||
522 | /* Free the list of bit blocks for each zone_bitmap object */ | |
523 | zone_bm = bm->zone_bm_list; | |
524 | while (zone_bm) { | |
525 | struct bm_block *bb; | |
526 | ||
527 | bb = zone_bm->bm_blocks; | |
528 | while (bb) { | |
529 | if (bb->data) | |
530 | free_image_page(bb->data, clear_nosave_free); | |
531 | bb = bb->next; | |
532 | } | |
533 | zone_bm = zone_bm->next; | |
534 | } | |
535 | free_list_of_pages(bm->p_list, clear_nosave_free); | |
536 | bm->zone_bm_list = NULL; | |
537 | } | |
538 | ||
539 | /** | |
540 | * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds | |
541 | * to given pfn. The cur_zone_bm member of @bm and the cur_block member | |
542 | * of @bm->cur_zone_bm are updated. | |
543 | * | |
544 | * If the bit cannot be set, the function returns -EINVAL . | |
545 | */ | |
546 | ||
547 | static int | |
548 | memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) | |
549 | { | |
550 | struct zone_bitmap *zone_bm; | |
551 | struct bm_block *bb; | |
552 | ||
553 | /* Check if the pfn is from the current zone */ | |
554 | zone_bm = bm->cur.zone_bm; | |
555 | if (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | |
556 | zone_bm = bm->zone_bm_list; | |
557 | /* We don't assume that the zones are sorted by pfns */ | |
558 | while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { | |
559 | zone_bm = zone_bm->next; | |
560 | if (unlikely(!zone_bm)) | |
561 | return -EINVAL; | |
562 | } | |
563 | bm->cur.zone_bm = zone_bm; | |
564 | } | |
565 | /* Check if the pfn corresponds to the current bitmap block */ | |
566 | bb = zone_bm->cur_block; | |
567 | if (pfn < bb->start_pfn) | |
568 | bb = zone_bm->bm_blocks; | |
569 | ||
570 | while (pfn >= bb->end_pfn) { | |
571 | bb = bb->next; | |
572 | if (unlikely(!bb)) | |
573 | return -EINVAL; | |
574 | } | |
575 | zone_bm->cur_block = bb; | |
576 | pfn -= bb->start_pfn; | |
577 | set_bit(pfn % BM_BITS_PER_CHUNK, bb->data + pfn / BM_BITS_PER_CHUNK); | |
578 | return 0; | |
579 | } | |
580 | ||
581 | /* Two auxiliary functions for memory_bm_next_pfn */ | |
582 | ||
583 | /* Find the first set bit in the given chunk, if there is one */ | |
584 | ||
585 | static inline int next_bit_in_chunk(int bit, unsigned long *chunk_p) | |
586 | { | |
587 | bit++; | |
588 | while (bit < BM_BITS_PER_CHUNK) { | |
589 | if (test_bit(bit, chunk_p)) | |
590 | return bit; | |
591 | ||
592 | bit++; | |
593 | } | |
594 | return -1; | |
595 | } | |
596 | ||
597 | /* Find a chunk containing some bits set in given block of bits */ | |
598 | ||
599 | static inline int next_chunk_in_block(int n, struct bm_block *bb) | |
600 | { | |
601 | n++; | |
602 | while (n < bb->size) { | |
603 | if (bb->data[n]) | |
604 | return n; | |
605 | ||
606 | n++; | |
607 | } | |
608 | return -1; | |
609 | } | |
610 | ||
611 | /** | |
612 | * memory_bm_next_pfn - find the pfn that corresponds to the next set bit | |
613 | * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is | |
614 | * returned. | |
615 | * | |
616 | * It is required to run memory_bm_position_reset() before the first call to | |
617 | * this function. | |
618 | */ | |
619 | ||
620 | static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) | |
621 | { | |
622 | struct zone_bitmap *zone_bm; | |
623 | struct bm_block *bb; | |
624 | int chunk; | |
625 | int bit; | |
626 | ||
627 | do { | |
628 | bb = bm->cur.block; | |
629 | do { | |
630 | chunk = bm->cur.chunk; | |
631 | bit = bm->cur.bit; | |
632 | do { | |
633 | bit = next_bit_in_chunk(bit, bb->data + chunk); | |
634 | if (bit >= 0) | |
635 | goto Return_pfn; | |
636 | ||
637 | chunk = next_chunk_in_block(chunk, bb); | |
638 | bit = -1; | |
639 | } while (chunk >= 0); | |
640 | bb = bb->next; | |
641 | bm->cur.block = bb; | |
642 | memory_bm_reset_chunk(bm); | |
643 | } while (bb); | |
644 | zone_bm = bm->cur.zone_bm->next; | |
645 | if (zone_bm) { | |
646 | bm->cur.zone_bm = zone_bm; | |
647 | bm->cur.block = zone_bm->bm_blocks; | |
648 | memory_bm_reset_chunk(bm); | |
649 | } | |
650 | } while (zone_bm); | |
651 | memory_bm_position_reset(bm); | |
652 | return BM_END_OF_MAP; | |
653 | ||
654 | Return_pfn: | |
655 | bm->cur.chunk = chunk; | |
656 | bm->cur.bit = bit; | |
657 | return bb->start_pfn + chunk * BM_BITS_PER_CHUNK + bit; | |
658 | } | |
659 | ||
660 | /** | |
661 | * snapshot_additional_pages - estimate the number of additional pages | |
662 | * be needed for setting up the suspend image data structures for given | |
663 | * zone (usually the returned value is greater than the exact number) | |
664 | */ | |
665 | ||
666 | unsigned int snapshot_additional_pages(struct zone *zone) | |
667 | { | |
668 | unsigned int res; | |
669 | ||
670 | res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); | |
671 | res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE); | |
672 | return res; | |
673 | } | |
674 | ||
f6143aa6 RW |
675 | /** |
676 | * pfn_is_nosave - check if given pfn is in the 'nosave' section | |
677 | */ | |
678 | ||
ae83c5ee | 679 | static inline int pfn_is_nosave(unsigned long pfn) |
25761b6e RW |
680 | { |
681 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | |
682 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT; | |
683 | return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); | |
684 | } | |
685 | ||
686 | /** | |
687 | * saveable - Determine whether a page should be cloned or not. | |
688 | * @pfn: The page | |
689 | * | |
ae83c5ee RW |
690 | * We save a page if it isn't Nosave, and is not in the range of pages |
691 | * statically defined as 'unsaveable', and it | |
692 | * isn't a part of a free chunk of pages. | |
25761b6e RW |
693 | */ |
694 | ||
ae83c5ee | 695 | static struct page *saveable_page(unsigned long pfn) |
25761b6e | 696 | { |
de491861 | 697 | struct page *page; |
25761b6e RW |
698 | |
699 | if (!pfn_valid(pfn)) | |
ae83c5ee | 700 | return NULL; |
25761b6e RW |
701 | |
702 | page = pfn_to_page(pfn); | |
ae83c5ee | 703 | |
25761b6e | 704 | if (PageNosave(page)) |
ae83c5ee | 705 | return NULL; |
72a97e08 | 706 | if (PageReserved(page) && pfn_is_nosave(pfn)) |
ae83c5ee | 707 | return NULL; |
25761b6e | 708 | if (PageNosaveFree(page)) |
ae83c5ee | 709 | return NULL; |
25761b6e | 710 | |
ae83c5ee | 711 | return page; |
25761b6e RW |
712 | } |
713 | ||
72a97e08 | 714 | unsigned int count_data_pages(void) |
25761b6e RW |
715 | { |
716 | struct zone *zone; | |
ae83c5ee | 717 | unsigned long pfn, max_zone_pfn; |
dc19d507 | 718 | unsigned int n = 0; |
25761b6e | 719 | |
25761b6e RW |
720 | for_each_zone (zone) { |
721 | if (is_highmem(zone)) | |
722 | continue; | |
723 | mark_free_pages(zone); | |
ae83c5ee RW |
724 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
725 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | |
726 | n += !!saveable_page(pfn); | |
25761b6e | 727 | } |
a0f49651 | 728 | return n; |
25761b6e RW |
729 | } |
730 | ||
f623f0db RW |
731 | static inline void copy_data_page(long *dst, long *src) |
732 | { | |
733 | int n; | |
734 | ||
735 | /* copy_page and memcpy are not usable for copying task structs. */ | |
736 | for (n = PAGE_SIZE / sizeof(long); n; n--) | |
737 | *dst++ = *src++; | |
738 | } | |
739 | ||
b788db79 RW |
740 | static void |
741 | copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) | |
25761b6e RW |
742 | { |
743 | struct zone *zone; | |
b788db79 | 744 | unsigned long pfn; |
25761b6e | 745 | |
25761b6e | 746 | for_each_zone (zone) { |
b788db79 RW |
747 | unsigned long max_zone_pfn; |
748 | ||
25761b6e RW |
749 | if (is_highmem(zone)) |
750 | continue; | |
b788db79 | 751 | |
25761b6e | 752 | mark_free_pages(zone); |
ae83c5ee | 753 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
b788db79 RW |
754 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
755 | if (saveable_page(pfn)) | |
756 | memory_bm_set_bit(orig_bm, pfn); | |
25761b6e | 757 | } |
b788db79 RW |
758 | memory_bm_position_reset(orig_bm); |
759 | memory_bm_position_reset(copy_bm); | |
760 | do { | |
761 | pfn = memory_bm_next_pfn(orig_bm); | |
762 | if (likely(pfn != BM_END_OF_MAP)) { | |
763 | struct page *page; | |
764 | void *src; | |
765 | ||
766 | page = pfn_to_page(pfn); | |
767 | src = page_address(page); | |
768 | page = pfn_to_page(memory_bm_next_pfn(copy_bm)); | |
769 | copy_data_page(page_address(page), src); | |
770 | } | |
771 | } while (pfn != BM_END_OF_MAP); | |
25761b6e RW |
772 | } |
773 | ||
25761b6e | 774 | /** |
940864dd | 775 | * swsusp_free - free pages allocated for the suspend. |
cd560bb2 | 776 | * |
940864dd RW |
777 | * Suspend pages are alocated before the atomic copy is made, so we |
778 | * need to release them after the resume. | |
25761b6e RW |
779 | */ |
780 | ||
781 | void swsusp_free(void) | |
782 | { | |
783 | struct zone *zone; | |
ae83c5ee | 784 | unsigned long pfn, max_zone_pfn; |
25761b6e RW |
785 | |
786 | for_each_zone(zone) { | |
ae83c5ee RW |
787 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
788 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | |
789 | if (pfn_valid(pfn)) { | |
790 | struct page *page = pfn_to_page(pfn); | |
791 | ||
25761b6e RW |
792 | if (PageNosave(page) && PageNosaveFree(page)) { |
793 | ClearPageNosave(page); | |
794 | ClearPageNosaveFree(page); | |
795 | free_page((long) page_address(page)); | |
796 | } | |
797 | } | |
798 | } | |
f577eb30 RW |
799 | nr_copy_pages = 0; |
800 | nr_meta_pages = 0; | |
75534b50 | 801 | restore_pblist = NULL; |
6e1819d6 | 802 | buffer = NULL; |
25761b6e RW |
803 | } |
804 | ||
805 | ||
806 | /** | |
807 | * enough_free_mem - Make sure we enough free memory to snapshot. | |
808 | * | |
809 | * Returns TRUE or FALSE after checking the number of available | |
810 | * free pages. | |
811 | */ | |
812 | ||
dc19d507 | 813 | static int enough_free_mem(unsigned int nr_pages) |
25761b6e | 814 | { |
e5e2fa78 | 815 | struct zone *zone; |
940864dd | 816 | unsigned int free = 0, meta = 0; |
e5e2fa78 RW |
817 | |
818 | for_each_zone (zone) | |
940864dd RW |
819 | if (!is_highmem(zone)) { |
820 | free += zone->free_pages; | |
821 | meta += snapshot_additional_pages(zone); | |
822 | } | |
823 | ||
824 | pr_debug("swsusp: pages needed: %u + %u + %u, available pages: %u\n", | |
825 | nr_pages, PAGES_FOR_IO, meta, free); | |
826 | ||
827 | return free > nr_pages + PAGES_FOR_IO + meta; | |
25761b6e RW |
828 | } |
829 | ||
b788db79 RW |
830 | static int |
831 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | |
832 | unsigned int nr_pages) | |
054bd4c1 | 833 | { |
b788db79 | 834 | int error; |
054bd4c1 | 835 | |
b788db79 RW |
836 | error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); |
837 | if (error) | |
838 | goto Free; | |
25761b6e | 839 | |
b788db79 RW |
840 | error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY); |
841 | if (error) | |
842 | goto Free; | |
25761b6e | 843 | |
b788db79 RW |
844 | while (nr_pages-- > 0) { |
845 | struct page *page = alloc_page(GFP_ATOMIC | __GFP_COLD); | |
846 | if (!page) | |
847 | goto Free; | |
25761b6e | 848 | |
b788db79 RW |
849 | SetPageNosave(page); |
850 | SetPageNosaveFree(page); | |
851 | memory_bm_set_bit(copy_bm, page_to_pfn(page)); | |
25761b6e | 852 | } |
b788db79 | 853 | return 0; |
25761b6e | 854 | |
b788db79 RW |
855 | Free: |
856 | swsusp_free(); | |
857 | return -ENOMEM; | |
25761b6e RW |
858 | } |
859 | ||
b788db79 RW |
860 | /* Memory bitmap used for marking saveable pages */ |
861 | static struct memory_bitmap orig_bm; | |
862 | /* Memory bitmap used for marking allocated pages that will contain the copies | |
863 | * of saveable pages | |
864 | */ | |
865 | static struct memory_bitmap copy_bm; | |
866 | ||
2e32a43e | 867 | asmlinkage int swsusp_save(void) |
25761b6e | 868 | { |
dc19d507 | 869 | unsigned int nr_pages; |
25761b6e RW |
870 | |
871 | pr_debug("swsusp: critical section: \n"); | |
25761b6e RW |
872 | |
873 | drain_local_pages(); | |
a0f49651 RW |
874 | nr_pages = count_data_pages(); |
875 | printk("swsusp: Need to copy %u pages\n", nr_pages); | |
25761b6e | 876 | |
a0f49651 | 877 | if (!enough_free_mem(nr_pages)) { |
25761b6e RW |
878 | printk(KERN_ERR "swsusp: Not enough free memory\n"); |
879 | return -ENOMEM; | |
880 | } | |
881 | ||
b788db79 | 882 | if (swsusp_alloc(&orig_bm, ©_bm, nr_pages)) |
a0f49651 | 883 | return -ENOMEM; |
25761b6e RW |
884 | |
885 | /* During allocating of suspend pagedir, new cold pages may appear. | |
886 | * Kill them. | |
887 | */ | |
888 | drain_local_pages(); | |
b788db79 | 889 | copy_data_pages(©_bm, &orig_bm); |
25761b6e RW |
890 | |
891 | /* | |
892 | * End of critical section. From now on, we can write to memory, | |
893 | * but we should not touch disk. This specially means we must _not_ | |
894 | * touch swap space! Except we must write out our image of course. | |
895 | */ | |
896 | ||
a0f49651 | 897 | nr_copy_pages = nr_pages; |
f577eb30 | 898 | nr_meta_pages = (nr_pages * sizeof(long) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
a0f49651 RW |
899 | |
900 | printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages); | |
25761b6e RW |
901 | return 0; |
902 | } | |
f577eb30 RW |
903 | |
904 | static void init_header(struct swsusp_info *info) | |
905 | { | |
906 | memset(info, 0, sizeof(struct swsusp_info)); | |
907 | info->version_code = LINUX_VERSION_CODE; | |
908 | info->num_physpages = num_physpages; | |
96b644bd | 909 | memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); |
f577eb30 RW |
910 | info->cpus = num_online_cpus(); |
911 | info->image_pages = nr_copy_pages; | |
912 | info->pages = nr_copy_pages + nr_meta_pages + 1; | |
6e1819d6 RW |
913 | info->size = info->pages; |
914 | info->size <<= PAGE_SHIFT; | |
f577eb30 RW |
915 | } |
916 | ||
917 | /** | |
940864dd RW |
918 | * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm |
919 | * are stored in the array @buf[] (1 page at a time) | |
f577eb30 RW |
920 | */ |
921 | ||
b788db79 | 922 | static inline void |
940864dd | 923 | pack_pfns(unsigned long *buf, struct memory_bitmap *bm) |
f577eb30 RW |
924 | { |
925 | int j; | |
926 | ||
b788db79 | 927 | for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { |
940864dd RW |
928 | buf[j] = memory_bm_next_pfn(bm); |
929 | if (unlikely(buf[j] == BM_END_OF_MAP)) | |
b788db79 | 930 | break; |
f577eb30 | 931 | } |
f577eb30 RW |
932 | } |
933 | ||
934 | /** | |
935 | * snapshot_read_next - used for reading the system memory snapshot. | |
936 | * | |
937 | * On the first call to it @handle should point to a zeroed | |
938 | * snapshot_handle structure. The structure gets updated and a pointer | |
939 | * to it should be passed to this function every next time. | |
940 | * | |
941 | * The @count parameter should contain the number of bytes the caller | |
942 | * wants to read from the snapshot. It must not be zero. | |
943 | * | |
944 | * On success the function returns a positive number. Then, the caller | |
945 | * is allowed to read up to the returned number of bytes from the memory | |
946 | * location computed by the data_of() macro. The number returned | |
947 | * may be smaller than @count, but this only happens if the read would | |
948 | * cross a page boundary otherwise. | |
949 | * | |
950 | * The function returns 0 to indicate the end of data stream condition, | |
951 | * and a negative number is returned on error. In such cases the | |
952 | * structure pointed to by @handle is not updated and should not be used | |
953 | * any more. | |
954 | */ | |
955 | ||
956 | int snapshot_read_next(struct snapshot_handle *handle, size_t count) | |
957 | { | |
fb13a28b | 958 | if (handle->cur > nr_meta_pages + nr_copy_pages) |
f577eb30 | 959 | return 0; |
b788db79 | 960 | |
f577eb30 RW |
961 | if (!buffer) { |
962 | /* This makes the buffer be freed by swsusp_free() */ | |
0bcd888d | 963 | buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); |
f577eb30 RW |
964 | if (!buffer) |
965 | return -ENOMEM; | |
966 | } | |
967 | if (!handle->offset) { | |
968 | init_header((struct swsusp_info *)buffer); | |
969 | handle->buffer = buffer; | |
b788db79 RW |
970 | memory_bm_position_reset(&orig_bm); |
971 | memory_bm_position_reset(©_bm); | |
f577eb30 | 972 | } |
fb13a28b RW |
973 | if (handle->prev < handle->cur) { |
974 | if (handle->cur <= nr_meta_pages) { | |
b788db79 | 975 | memset(buffer, 0, PAGE_SIZE); |
940864dd | 976 | pack_pfns(buffer, &orig_bm); |
f577eb30 | 977 | } else { |
b788db79 RW |
978 | unsigned long pfn = memory_bm_next_pfn(©_bm); |
979 | ||
980 | handle->buffer = page_address(pfn_to_page(pfn)); | |
f577eb30 | 981 | } |
fb13a28b | 982 | handle->prev = handle->cur; |
f577eb30 | 983 | } |
fb13a28b RW |
984 | handle->buf_offset = handle->cur_offset; |
985 | if (handle->cur_offset + count >= PAGE_SIZE) { | |
986 | count = PAGE_SIZE - handle->cur_offset; | |
987 | handle->cur_offset = 0; | |
988 | handle->cur++; | |
f577eb30 | 989 | } else { |
fb13a28b | 990 | handle->cur_offset += count; |
f577eb30 RW |
991 | } |
992 | handle->offset += count; | |
993 | return count; | |
994 | } | |
995 | ||
996 | /** | |
997 | * mark_unsafe_pages - mark the pages that cannot be used for storing | |
998 | * the image during resume, because they conflict with the pages that | |
999 | * had been used before suspend | |
1000 | */ | |
1001 | ||
940864dd | 1002 | static int mark_unsafe_pages(struct memory_bitmap *bm) |
f577eb30 RW |
1003 | { |
1004 | struct zone *zone; | |
ae83c5ee | 1005 | unsigned long pfn, max_zone_pfn; |
f577eb30 RW |
1006 | |
1007 | /* Clear page flags */ | |
1008 | for_each_zone (zone) { | |
ae83c5ee RW |
1009 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; |
1010 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | |
1011 | if (pfn_valid(pfn)) | |
1012 | ClearPageNosaveFree(pfn_to_page(pfn)); | |
f577eb30 RW |
1013 | } |
1014 | ||
940864dd RW |
1015 | /* Mark pages that correspond to the "original" pfns as "unsafe" */ |
1016 | memory_bm_position_reset(bm); | |
1017 | do { | |
1018 | pfn = memory_bm_next_pfn(bm); | |
1019 | if (likely(pfn != BM_END_OF_MAP)) { | |
1020 | if (likely(pfn_valid(pfn))) | |
1021 | SetPageNosaveFree(pfn_to_page(pfn)); | |
1022 | else | |
1023 | return -EFAULT; | |
1024 | } | |
1025 | } while (pfn != BM_END_OF_MAP); | |
f577eb30 | 1026 | |
940864dd | 1027 | allocated_unsafe_pages = 0; |
968808b8 | 1028 | |
f577eb30 RW |
1029 | return 0; |
1030 | } | |
1031 | ||
940864dd RW |
1032 | static void |
1033 | duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src) | |
f577eb30 | 1034 | { |
940864dd RW |
1035 | unsigned long pfn; |
1036 | ||
1037 | memory_bm_position_reset(src); | |
1038 | pfn = memory_bm_next_pfn(src); | |
1039 | while (pfn != BM_END_OF_MAP) { | |
1040 | memory_bm_set_bit(dst, pfn); | |
1041 | pfn = memory_bm_next_pfn(src); | |
f577eb30 RW |
1042 | } |
1043 | } | |
1044 | ||
940864dd | 1045 | static inline int check_header(struct swsusp_info *info) |
f577eb30 RW |
1046 | { |
1047 | char *reason = NULL; | |
1048 | ||
1049 | if (info->version_code != LINUX_VERSION_CODE) | |
1050 | reason = "kernel version"; | |
1051 | if (info->num_physpages != num_physpages) | |
1052 | reason = "memory size"; | |
96b644bd | 1053 | if (strcmp(info->uts.sysname,init_utsname()->sysname)) |
f577eb30 | 1054 | reason = "system type"; |
96b644bd | 1055 | if (strcmp(info->uts.release,init_utsname()->release)) |
f577eb30 | 1056 | reason = "kernel release"; |
96b644bd | 1057 | if (strcmp(info->uts.version,init_utsname()->version)) |
f577eb30 | 1058 | reason = "version"; |
96b644bd | 1059 | if (strcmp(info->uts.machine,init_utsname()->machine)) |
f577eb30 RW |
1060 | reason = "machine"; |
1061 | if (reason) { | |
1062 | printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); | |
1063 | return -EPERM; | |
1064 | } | |
1065 | return 0; | |
1066 | } | |
1067 | ||
1068 | /** | |
1069 | * load header - check the image header and copy data from it | |
1070 | */ | |
1071 | ||
940864dd RW |
1072 | static int |
1073 | load_header(struct swsusp_info *info) | |
f577eb30 RW |
1074 | { |
1075 | int error; | |
f577eb30 | 1076 | |
940864dd | 1077 | restore_pblist = NULL; |
f577eb30 RW |
1078 | error = check_header(info); |
1079 | if (!error) { | |
f577eb30 RW |
1080 | nr_copy_pages = info->image_pages; |
1081 | nr_meta_pages = info->pages - info->image_pages - 1; | |
1082 | } | |
1083 | return error; | |
1084 | } | |
1085 | ||
1086 | /** | |
940864dd RW |
1087 | * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set |
1088 | * the corresponding bit in the memory bitmap @bm | |
f577eb30 RW |
1089 | */ |
1090 | ||
940864dd RW |
1091 | static inline void |
1092 | unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm) | |
f577eb30 RW |
1093 | { |
1094 | int j; | |
1095 | ||
940864dd RW |
1096 | for (j = 0; j < PAGE_SIZE / sizeof(long); j++) { |
1097 | if (unlikely(buf[j] == BM_END_OF_MAP)) | |
1098 | break; | |
1099 | ||
1100 | memory_bm_set_bit(bm, buf[j]); | |
f577eb30 | 1101 | } |
f577eb30 RW |
1102 | } |
1103 | ||
1104 | /** | |
940864dd RW |
1105 | * prepare_image - use the memory bitmap @bm to mark the pages that will |
1106 | * be overwritten in the process of restoring the system memory state | |
1107 | * from the suspend image ("unsafe" pages) and allocate memory for the | |
1108 | * image. | |
968808b8 | 1109 | * |
940864dd RW |
1110 | * The idea is to allocate a new memory bitmap first and then allocate |
1111 | * as many pages as needed for the image data, but not to assign these | |
1112 | * pages to specific tasks initially. Instead, we just mark them as | |
1113 | * allocated and create a list of "safe" pages that will be used later. | |
f577eb30 RW |
1114 | */ |
1115 | ||
940864dd RW |
1116 | #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe)) |
1117 | ||
1118 | static struct linked_page *safe_pages_list; | |
968808b8 | 1119 | |
940864dd RW |
1120 | static int |
1121 | prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm) | |
f577eb30 | 1122 | { |
940864dd RW |
1123 | unsigned int nr_pages; |
1124 | struct linked_page *sp_list, *lp; | |
1125 | int error; | |
f577eb30 | 1126 | |
940864dd RW |
1127 | error = mark_unsafe_pages(bm); |
1128 | if (error) | |
1129 | goto Free; | |
1130 | ||
1131 | error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE); | |
1132 | if (error) | |
1133 | goto Free; | |
1134 | ||
1135 | duplicate_memory_bitmap(new_bm, bm); | |
1136 | memory_bm_free(bm, PG_UNSAFE_KEEP); | |
1137 | /* Reserve some safe pages for potential later use. | |
1138 | * | |
1139 | * NOTE: This way we make sure there will be enough safe pages for the | |
1140 | * chain_alloc() in get_buffer(). It is a bit wasteful, but | |
1141 | * nr_copy_pages cannot be greater than 50% of the memory anyway. | |
1142 | */ | |
1143 | sp_list = NULL; | |
1144 | /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */ | |
1145 | nr_pages = nr_copy_pages - allocated_unsafe_pages; | |
1146 | nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE); | |
1147 | while (nr_pages > 0) { | |
1148 | lp = alloc_image_page(GFP_ATOMIC, PG_SAFE); | |
1149 | if (!lp) { | |
f577eb30 | 1150 | error = -ENOMEM; |
940864dd RW |
1151 | goto Free; |
1152 | } | |
1153 | lp->next = sp_list; | |
1154 | sp_list = lp; | |
1155 | nr_pages--; | |
f577eb30 | 1156 | } |
940864dd RW |
1157 | /* Preallocate memory for the image */ |
1158 | safe_pages_list = NULL; | |
1159 | nr_pages = nr_copy_pages - allocated_unsafe_pages; | |
1160 | while (nr_pages > 0) { | |
1161 | lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC); | |
1162 | if (!lp) { | |
1163 | error = -ENOMEM; | |
1164 | goto Free; | |
1165 | } | |
1166 | if (!PageNosaveFree(virt_to_page(lp))) { | |
1167 | /* The page is "safe", add it to the list */ | |
1168 | lp->next = safe_pages_list; | |
1169 | safe_pages_list = lp; | |
968808b8 | 1170 | } |
940864dd RW |
1171 | /* Mark the page as allocated */ |
1172 | SetPageNosave(virt_to_page(lp)); | |
1173 | SetPageNosaveFree(virt_to_page(lp)); | |
1174 | nr_pages--; | |
968808b8 | 1175 | } |
940864dd RW |
1176 | /* Free the reserved safe pages so that chain_alloc() can use them */ |
1177 | while (sp_list) { | |
1178 | lp = sp_list->next; | |
1179 | free_image_page(sp_list, PG_UNSAFE_CLEAR); | |
1180 | sp_list = lp; | |
f577eb30 | 1181 | } |
940864dd RW |
1182 | return 0; |
1183 | ||
1184 | Free: | |
1185 | swsusp_free(); | |
f577eb30 RW |
1186 | return error; |
1187 | } | |
1188 | ||
940864dd RW |
1189 | /** |
1190 | * get_buffer - compute the address that snapshot_write_next() should | |
1191 | * set for its caller to write to. | |
1192 | */ | |
1193 | ||
1194 | static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca) | |
968808b8 | 1195 | { |
940864dd RW |
1196 | struct pbe *pbe; |
1197 | struct page *page = pfn_to_page(memory_bm_next_pfn(bm)); | |
968808b8 | 1198 | |
940864dd RW |
1199 | if (PageNosave(page) && PageNosaveFree(page)) |
1200 | /* We have allocated the "original" page frame and we can | |
1201 | * use it directly to store the loaded page. | |
968808b8 | 1202 | */ |
940864dd RW |
1203 | return page_address(page); |
1204 | ||
1205 | /* The "original" page frame has not been allocated and we have to | |
1206 | * use a "safe" page frame to store the loaded page. | |
968808b8 | 1207 | */ |
940864dd RW |
1208 | pbe = chain_alloc(ca, sizeof(struct pbe)); |
1209 | if (!pbe) { | |
1210 | swsusp_free(); | |
1211 | return NULL; | |
1212 | } | |
1213 | pbe->orig_address = (unsigned long)page_address(page); | |
1214 | pbe->address = (unsigned long)safe_pages_list; | |
1215 | safe_pages_list = safe_pages_list->next; | |
1216 | pbe->next = restore_pblist; | |
1217 | restore_pblist = pbe; | |
968808b8 RW |
1218 | return (void *)pbe->address; |
1219 | } | |
1220 | ||
f577eb30 RW |
1221 | /** |
1222 | * snapshot_write_next - used for writing the system memory snapshot. | |
1223 | * | |
1224 | * On the first call to it @handle should point to a zeroed | |
1225 | * snapshot_handle structure. The structure gets updated and a pointer | |
1226 | * to it should be passed to this function every next time. | |
1227 | * | |
1228 | * The @count parameter should contain the number of bytes the caller | |
1229 | * wants to write to the image. It must not be zero. | |
1230 | * | |
1231 | * On success the function returns a positive number. Then, the caller | |
1232 | * is allowed to write up to the returned number of bytes to the memory | |
1233 | * location computed by the data_of() macro. The number returned | |
1234 | * may be smaller than @count, but this only happens if the write would | |
1235 | * cross a page boundary otherwise. | |
1236 | * | |
1237 | * The function returns 0 to indicate the "end of file" condition, | |
1238 | * and a negative number is returned on error. In such cases the | |
1239 | * structure pointed to by @handle is not updated and should not be used | |
1240 | * any more. | |
1241 | */ | |
1242 | ||
1243 | int snapshot_write_next(struct snapshot_handle *handle, size_t count) | |
1244 | { | |
940864dd | 1245 | static struct chain_allocator ca; |
f577eb30 RW |
1246 | int error = 0; |
1247 | ||
940864dd | 1248 | /* Check if we have already loaded the entire image */ |
fb13a28b | 1249 | if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) |
f577eb30 | 1250 | return 0; |
940864dd | 1251 | |
f577eb30 RW |
1252 | if (!buffer) { |
1253 | /* This makes the buffer be freed by swsusp_free() */ | |
0bcd888d | 1254 | buffer = alloc_image_page(GFP_ATOMIC, PG_ANY); |
f577eb30 RW |
1255 | if (!buffer) |
1256 | return -ENOMEM; | |
1257 | } | |
1258 | if (!handle->offset) | |
1259 | handle->buffer = buffer; | |
546e0d27 | 1260 | handle->sync_read = 1; |
fb13a28b | 1261 | if (handle->prev < handle->cur) { |
940864dd RW |
1262 | if (handle->prev == 0) { |
1263 | error = load_header(buffer); | |
1264 | if (error) | |
1265 | return error; | |
1266 | ||
1267 | error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY); | |
f577eb30 RW |
1268 | if (error) |
1269 | return error; | |
940864dd | 1270 | |
f577eb30 | 1271 | } else if (handle->prev <= nr_meta_pages) { |
940864dd RW |
1272 | unpack_orig_pfns(buffer, ©_bm); |
1273 | if (handle->prev == nr_meta_pages) { | |
1274 | error = prepare_image(&orig_bm, ©_bm); | |
f577eb30 RW |
1275 | if (error) |
1276 | return error; | |
940864dd RW |
1277 | |
1278 | chain_init(&ca, GFP_ATOMIC, PG_SAFE); | |
1279 | memory_bm_position_reset(&orig_bm); | |
1280 | restore_pblist = NULL; | |
1281 | handle->buffer = get_buffer(&orig_bm, &ca); | |
546e0d27 | 1282 | handle->sync_read = 0; |
940864dd RW |
1283 | if (!handle->buffer) |
1284 | return -ENOMEM; | |
f577eb30 RW |
1285 | } |
1286 | } else { | |
940864dd | 1287 | handle->buffer = get_buffer(&orig_bm, &ca); |
546e0d27 | 1288 | handle->sync_read = 0; |
f577eb30 | 1289 | } |
fb13a28b | 1290 | handle->prev = handle->cur; |
f577eb30 | 1291 | } |
fb13a28b RW |
1292 | handle->buf_offset = handle->cur_offset; |
1293 | if (handle->cur_offset + count >= PAGE_SIZE) { | |
1294 | count = PAGE_SIZE - handle->cur_offset; | |
1295 | handle->cur_offset = 0; | |
1296 | handle->cur++; | |
f577eb30 | 1297 | } else { |
fb13a28b | 1298 | handle->cur_offset += count; |
f577eb30 RW |
1299 | } |
1300 | handle->offset += count; | |
1301 | return count; | |
1302 | } | |
1303 | ||
1304 | int snapshot_image_loaded(struct snapshot_handle *handle) | |
1305 | { | |
940864dd RW |
1306 | return !(!nr_copy_pages || |
1307 | handle->cur <= nr_meta_pages + nr_copy_pages); | |
1308 | } | |
1309 | ||
1310 | void snapshot_free_unused_memory(struct snapshot_handle *handle) | |
1311 | { | |
1312 | /* Free only if we have loaded the image entirely */ | |
1313 | if (handle->prev && handle->cur > nr_meta_pages + nr_copy_pages) | |
1314 | memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR); | |
f577eb30 | 1315 | } |