Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[deliverable/linux.git] / kernel / power / snapshot.c
1 /*
2 * linux/kernel/power/snapshot.c
3 *
4 * This file provides system snapshot/restore functionality for swsusp.
5 *
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8 *
9 * This file is released under the GPLv2.
10 *
11 */
12
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31
32 #include <asm/uaccess.h>
33 #include <asm/mmu_context.h>
34 #include <asm/pgtable.h>
35 #include <asm/tlbflush.h>
36 #include <asm/io.h>
37
38 #include "power.h"
39
40 static int swsusp_page_is_free(struct page *);
41 static void swsusp_set_page_forbidden(struct page *);
42 static void swsusp_unset_page_forbidden(struct page *);
43
44 /*
45 * Number of bytes to reserve for memory allocations made by device drivers
46 * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
47 * cause image creation to fail (tunable via /sys/power/reserved_size).
48 */
49 unsigned long reserved_size;
50
51 void __init hibernate_reserved_size_init(void)
52 {
53 reserved_size = SPARE_PAGES * PAGE_SIZE;
54 }
55
56 /*
57 * Preferred image size in bytes (tunable via /sys/power/image_size).
58 * When it is set to N, swsusp will do its best to ensure the image
59 * size will not exceed N bytes, but if that is impossible, it will
60 * try to create the smallest image possible.
61 */
62 unsigned long image_size;
63
64 void __init hibernate_image_size_init(void)
65 {
66 image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
67 }
68
69 /* List of PBEs needed for restoring the pages that were allocated before
70 * the suspend and included in the suspend image, but have also been
71 * allocated by the "resume" kernel, so their contents cannot be written
72 * directly to their "original" page frames.
73 */
74 struct pbe *restore_pblist;
75
76 /* Pointer to an auxiliary buffer (1 page) */
77 static void *buffer;
78
79 /**
80 * @safe_needed - on resume, for storing the PBE list and the image,
81 * we can only use memory pages that do not conflict with the pages
82 * used before suspend. The unsafe pages have PageNosaveFree set
83 * and we count them using unsafe_pages.
84 *
85 * Each allocated image page is marked as PageNosave and PageNosaveFree
86 * so that swsusp_free() can release it.
87 */
88
89 #define PG_ANY 0
90 #define PG_SAFE 1
91 #define PG_UNSAFE_CLEAR 1
92 #define PG_UNSAFE_KEEP 0
93
94 static unsigned int allocated_unsafe_pages;
95
96 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
97 {
98 void *res;
99
100 res = (void *)get_zeroed_page(gfp_mask);
101 if (safe_needed)
102 while (res && swsusp_page_is_free(virt_to_page(res))) {
103 /* The page is unsafe, mark it for swsusp_free() */
104 swsusp_set_page_forbidden(virt_to_page(res));
105 allocated_unsafe_pages++;
106 res = (void *)get_zeroed_page(gfp_mask);
107 }
108 if (res) {
109 swsusp_set_page_forbidden(virt_to_page(res));
110 swsusp_set_page_free(virt_to_page(res));
111 }
112 return res;
113 }
114
115 unsigned long get_safe_page(gfp_t gfp_mask)
116 {
117 return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
118 }
119
120 static struct page *alloc_image_page(gfp_t gfp_mask)
121 {
122 struct page *page;
123
124 page = alloc_page(gfp_mask);
125 if (page) {
126 swsusp_set_page_forbidden(page);
127 swsusp_set_page_free(page);
128 }
129 return page;
130 }
131
132 /**
133 * free_image_page - free page represented by @addr, allocated with
134 * get_image_page (page flags set by it must be cleared)
135 */
136
137 static inline void free_image_page(void *addr, int clear_nosave_free)
138 {
139 struct page *page;
140
141 BUG_ON(!virt_addr_valid(addr));
142
143 page = virt_to_page(addr);
144
145 swsusp_unset_page_forbidden(page);
146 if (clear_nosave_free)
147 swsusp_unset_page_free(page);
148
149 __free_page(page);
150 }
151
152 /* struct linked_page is used to build chains of pages */
153
154 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
155
156 struct linked_page {
157 struct linked_page *next;
158 char data[LINKED_PAGE_DATA_SIZE];
159 } __packed;
160
161 static inline void
162 free_list_of_pages(struct linked_page *list, int clear_page_nosave)
163 {
164 while (list) {
165 struct linked_page *lp = list->next;
166
167 free_image_page(list, clear_page_nosave);
168 list = lp;
169 }
170 }
171
172 /**
173 * struct chain_allocator is used for allocating small objects out of
174 * a linked list of pages called 'the chain'.
175 *
176 * The chain grows each time when there is no room for a new object in
177 * the current page. The allocated objects cannot be freed individually.
178 * It is only possible to free them all at once, by freeing the entire
179 * chain.
180 *
181 * NOTE: The chain allocator may be inefficient if the allocated objects
182 * are not much smaller than PAGE_SIZE.
183 */
184
185 struct chain_allocator {
186 struct linked_page *chain; /* the chain */
187 unsigned int used_space; /* total size of objects allocated out
188 * of the current page
189 */
190 gfp_t gfp_mask; /* mask for allocating pages */
191 int safe_needed; /* if set, only "safe" pages are allocated */
192 };
193
194 static void
195 chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
196 {
197 ca->chain = NULL;
198 ca->used_space = LINKED_PAGE_DATA_SIZE;
199 ca->gfp_mask = gfp_mask;
200 ca->safe_needed = safe_needed;
201 }
202
203 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
204 {
205 void *ret;
206
207 if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
208 struct linked_page *lp;
209
210 lp = get_image_page(ca->gfp_mask, ca->safe_needed);
211 if (!lp)
212 return NULL;
213
214 lp->next = ca->chain;
215 ca->chain = lp;
216 ca->used_space = 0;
217 }
218 ret = ca->chain->data + ca->used_space;
219 ca->used_space += size;
220 return ret;
221 }
222
223 /**
224 * Data types related to memory bitmaps.
225 *
226 * Memory bitmap is a structure consiting of many linked lists of
227 * objects. The main list's elements are of type struct zone_bitmap
228 * and each of them corresonds to one zone. For each zone bitmap
229 * object there is a list of objects of type struct bm_block that
230 * represent each blocks of bitmap in which information is stored.
231 *
232 * struct memory_bitmap contains a pointer to the main list of zone
233 * bitmap objects, a struct bm_position used for browsing the bitmap,
234 * and a pointer to the list of pages used for allocating all of the
235 * zone bitmap objects and bitmap block objects.
236 *
237 * NOTE: It has to be possible to lay out the bitmap in memory
238 * using only allocations of order 0. Additionally, the bitmap is
239 * designed to work with arbitrary number of zones (this is over the
240 * top for now, but let's avoid making unnecessary assumptions ;-).
241 *
242 * struct zone_bitmap contains a pointer to a list of bitmap block
243 * objects and a pointer to the bitmap block object that has been
244 * most recently used for setting bits. Additionally, it contains the
245 * pfns that correspond to the start and end of the represented zone.
246 *
247 * struct bm_block contains a pointer to the memory page in which
248 * information is stored (in the form of a block of bitmap)
249 * It also contains the pfns that correspond to the start and end of
250 * the represented memory area.
251 *
252 * The memory bitmap is organized as a radix tree to guarantee fast random
253 * access to the bits. There is one radix tree for each zone (as returned
254 * from create_mem_extents).
255 *
256 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
257 * two linked lists for the nodes of the tree, one for the inner nodes and
258 * one for the leave nodes. The linked leave nodes are used for fast linear
259 * access of the memory bitmap.
260 *
261 * The struct rtree_node represents one node of the radix tree.
262 */
263
264 #define BM_END_OF_MAP (~0UL)
265
266 #define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
267 #define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
268 #define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
269
270 /*
271 * struct rtree_node is a wrapper struct to link the nodes
272 * of the rtree together for easy linear iteration over
273 * bits and easy freeing
274 */
275 struct rtree_node {
276 struct list_head list;
277 unsigned long *data;
278 };
279
280 /*
281 * struct mem_zone_bm_rtree represents a bitmap used for one
282 * populated memory zone.
283 */
284 struct mem_zone_bm_rtree {
285 struct list_head list; /* Link Zones together */
286 struct list_head nodes; /* Radix Tree inner nodes */
287 struct list_head leaves; /* Radix Tree leaves */
288 unsigned long start_pfn; /* Zone start page frame */
289 unsigned long end_pfn; /* Zone end page frame + 1 */
290 struct rtree_node *rtree; /* Radix Tree Root */
291 int levels; /* Number of Radix Tree Levels */
292 unsigned int blocks; /* Number of Bitmap Blocks */
293 };
294
295 /* strcut bm_position is used for browsing memory bitmaps */
296
297 struct bm_position {
298 struct mem_zone_bm_rtree *zone;
299 struct rtree_node *node;
300 unsigned long node_pfn;
301 int node_bit;
302 };
303
304 struct memory_bitmap {
305 struct list_head zones;
306 struct linked_page *p_list; /* list of pages used to store zone
307 * bitmap objects and bitmap block
308 * objects
309 */
310 struct bm_position cur; /* most recently used bit position */
311 };
312
313 /* Functions that operate on memory bitmaps */
314
315 #define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
316 #if BITS_PER_LONG == 32
317 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
318 #else
319 #define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
320 #endif
321 #define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
322
323 /*
324 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
325 *
326 * This function is used to allocate inner nodes as well as the
327 * leave nodes of the radix tree. It also adds the node to the
328 * corresponding linked list passed in by the *list parameter.
329 */
330 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
331 struct chain_allocator *ca,
332 struct list_head *list)
333 {
334 struct rtree_node *node;
335
336 node = chain_alloc(ca, sizeof(struct rtree_node));
337 if (!node)
338 return NULL;
339
340 node->data = get_image_page(gfp_mask, safe_needed);
341 if (!node->data)
342 return NULL;
343
344 list_add_tail(&node->list, list);
345
346 return node;
347 }
348
349 /*
350 * add_rtree_block - Add a new leave node to the radix tree
351 *
352 * The leave nodes need to be allocated in order to keep the leaves
353 * linked list in order. This is guaranteed by the zone->blocks
354 * counter.
355 */
356 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
357 int safe_needed, struct chain_allocator *ca)
358 {
359 struct rtree_node *node, *block, **dst;
360 unsigned int levels_needed, block_nr;
361 int i;
362
363 block_nr = zone->blocks;
364 levels_needed = 0;
365
366 /* How many levels do we need for this block nr? */
367 while (block_nr) {
368 levels_needed += 1;
369 block_nr >>= BM_RTREE_LEVEL_SHIFT;
370 }
371
372 /* Make sure the rtree has enough levels */
373 for (i = zone->levels; i < levels_needed; i++) {
374 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
375 &zone->nodes);
376 if (!node)
377 return -ENOMEM;
378
379 node->data[0] = (unsigned long)zone->rtree;
380 zone->rtree = node;
381 zone->levels += 1;
382 }
383
384 /* Allocate new block */
385 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
386 if (!block)
387 return -ENOMEM;
388
389 /* Now walk the rtree to insert the block */
390 node = zone->rtree;
391 dst = &zone->rtree;
392 block_nr = zone->blocks;
393 for (i = zone->levels; i > 0; i--) {
394 int index;
395
396 if (!node) {
397 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
398 &zone->nodes);
399 if (!node)
400 return -ENOMEM;
401 *dst = node;
402 }
403
404 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
405 index &= BM_RTREE_LEVEL_MASK;
406 dst = (struct rtree_node **)&((*dst)->data[index]);
407 node = *dst;
408 }
409
410 zone->blocks += 1;
411 *dst = block;
412
413 return 0;
414 }
415
416 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
417 int clear_nosave_free);
418
419 /*
420 * create_zone_bm_rtree - create a radix tree for one zone
421 *
422 * Allocated the mem_zone_bm_rtree structure and initializes it.
423 * This function also allocated and builds the radix tree for the
424 * zone.
425 */
426 static struct mem_zone_bm_rtree *
427 create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
428 struct chain_allocator *ca,
429 unsigned long start, unsigned long end)
430 {
431 struct mem_zone_bm_rtree *zone;
432 unsigned int i, nr_blocks;
433 unsigned long pages;
434
435 pages = end - start;
436 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
437 if (!zone)
438 return NULL;
439
440 INIT_LIST_HEAD(&zone->nodes);
441 INIT_LIST_HEAD(&zone->leaves);
442 zone->start_pfn = start;
443 zone->end_pfn = end;
444 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
445
446 for (i = 0; i < nr_blocks; i++) {
447 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
448 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
449 return NULL;
450 }
451 }
452
453 return zone;
454 }
455
456 /*
457 * free_zone_bm_rtree - Free the memory of the radix tree
458 *
459 * Free all node pages of the radix tree. The mem_zone_bm_rtree
460 * structure itself is not freed here nor are the rtree_node
461 * structs.
462 */
463 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
464 int clear_nosave_free)
465 {
466 struct rtree_node *node;
467
468 list_for_each_entry(node, &zone->nodes, list)
469 free_image_page(node->data, clear_nosave_free);
470
471 list_for_each_entry(node, &zone->leaves, list)
472 free_image_page(node->data, clear_nosave_free);
473 }
474
475 static void memory_bm_position_reset(struct memory_bitmap *bm)
476 {
477 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
478 list);
479 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
480 struct rtree_node, list);
481 bm->cur.node_pfn = 0;
482 bm->cur.node_bit = 0;
483 }
484
485 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
486
487 struct mem_extent {
488 struct list_head hook;
489 unsigned long start;
490 unsigned long end;
491 };
492
493 /**
494 * free_mem_extents - free a list of memory extents
495 * @list - list of extents to empty
496 */
497 static void free_mem_extents(struct list_head *list)
498 {
499 struct mem_extent *ext, *aux;
500
501 list_for_each_entry_safe(ext, aux, list, hook) {
502 list_del(&ext->hook);
503 kfree(ext);
504 }
505 }
506
507 /**
508 * create_mem_extents - create a list of memory extents representing
509 * contiguous ranges of PFNs
510 * @list - list to put the extents into
511 * @gfp_mask - mask to use for memory allocations
512 */
513 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
514 {
515 struct zone *zone;
516
517 INIT_LIST_HEAD(list);
518
519 for_each_populated_zone(zone) {
520 unsigned long zone_start, zone_end;
521 struct mem_extent *ext, *cur, *aux;
522
523 zone_start = zone->zone_start_pfn;
524 zone_end = zone_end_pfn(zone);
525
526 list_for_each_entry(ext, list, hook)
527 if (zone_start <= ext->end)
528 break;
529
530 if (&ext->hook == list || zone_end < ext->start) {
531 /* New extent is necessary */
532 struct mem_extent *new_ext;
533
534 new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
535 if (!new_ext) {
536 free_mem_extents(list);
537 return -ENOMEM;
538 }
539 new_ext->start = zone_start;
540 new_ext->end = zone_end;
541 list_add_tail(&new_ext->hook, &ext->hook);
542 continue;
543 }
544
545 /* Merge this zone's range of PFNs with the existing one */
546 if (zone_start < ext->start)
547 ext->start = zone_start;
548 if (zone_end > ext->end)
549 ext->end = zone_end;
550
551 /* More merging may be possible */
552 cur = ext;
553 list_for_each_entry_safe_continue(cur, aux, list, hook) {
554 if (zone_end < cur->start)
555 break;
556 if (zone_end < cur->end)
557 ext->end = cur->end;
558 list_del(&cur->hook);
559 kfree(cur);
560 }
561 }
562
563 return 0;
564 }
565
566 /**
567 * memory_bm_create - allocate memory for a memory bitmap
568 */
569 static int
570 memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
571 {
572 struct chain_allocator ca;
573 struct list_head mem_extents;
574 struct mem_extent *ext;
575 int error;
576
577 chain_init(&ca, gfp_mask, safe_needed);
578 INIT_LIST_HEAD(&bm->zones);
579
580 error = create_mem_extents(&mem_extents, gfp_mask);
581 if (error)
582 return error;
583
584 list_for_each_entry(ext, &mem_extents, hook) {
585 struct mem_zone_bm_rtree *zone;
586
587 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
588 ext->start, ext->end);
589 if (!zone) {
590 error = -ENOMEM;
591 goto Error;
592 }
593 list_add_tail(&zone->list, &bm->zones);
594 }
595
596 bm->p_list = ca.chain;
597 memory_bm_position_reset(bm);
598 Exit:
599 free_mem_extents(&mem_extents);
600 return error;
601
602 Error:
603 bm->p_list = ca.chain;
604 memory_bm_free(bm, PG_UNSAFE_CLEAR);
605 goto Exit;
606 }
607
608 /**
609 * memory_bm_free - free memory occupied by the memory bitmap @bm
610 */
611 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
612 {
613 struct mem_zone_bm_rtree *zone;
614
615 list_for_each_entry(zone, &bm->zones, list)
616 free_zone_bm_rtree(zone, clear_nosave_free);
617
618 free_list_of_pages(bm->p_list, clear_nosave_free);
619
620 INIT_LIST_HEAD(&bm->zones);
621 }
622
623 /**
624 * memory_bm_find_bit - Find the bit for pfn in the memory
625 * bitmap
626 *
627 * Find the bit in the bitmap @bm that corresponds to given pfn.
628 * The cur.zone, cur.block and cur.node_pfn member of @bm are
629 * updated.
630 * It walks the radix tree to find the page which contains the bit for
631 * pfn and returns the bit position in **addr and *bit_nr.
632 */
633 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
634 void **addr, unsigned int *bit_nr)
635 {
636 struct mem_zone_bm_rtree *curr, *zone;
637 struct rtree_node *node;
638 int i, block_nr;
639
640 zone = bm->cur.zone;
641
642 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
643 goto zone_found;
644
645 zone = NULL;
646
647 /* Find the right zone */
648 list_for_each_entry(curr, &bm->zones, list) {
649 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
650 zone = curr;
651 break;
652 }
653 }
654
655 if (!zone)
656 return -EFAULT;
657
658 zone_found:
659 /*
660 * We have a zone. Now walk the radix tree to find the leave
661 * node for our pfn.
662 */
663
664 node = bm->cur.node;
665 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
666 goto node_found;
667
668 node = zone->rtree;
669 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
670
671 for (i = zone->levels; i > 0; i--) {
672 int index;
673
674 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
675 index &= BM_RTREE_LEVEL_MASK;
676 BUG_ON(node->data[index] == 0);
677 node = (struct rtree_node *)node->data[index];
678 }
679
680 node_found:
681 /* Update last position */
682 bm->cur.zone = zone;
683 bm->cur.node = node;
684 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
685
686 /* Set return values */
687 *addr = node->data;
688 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
689
690 return 0;
691 }
692
693 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
694 {
695 void *addr;
696 unsigned int bit;
697 int error;
698
699 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
700 BUG_ON(error);
701 set_bit(bit, addr);
702 }
703
704 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
705 {
706 void *addr;
707 unsigned int bit;
708 int error;
709
710 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
711 if (!error)
712 set_bit(bit, addr);
713
714 return error;
715 }
716
717 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
718 {
719 void *addr;
720 unsigned int bit;
721 int error;
722
723 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
724 BUG_ON(error);
725 clear_bit(bit, addr);
726 }
727
728 static void memory_bm_clear_current(struct memory_bitmap *bm)
729 {
730 int bit;
731
732 bit = max(bm->cur.node_bit - 1, 0);
733 clear_bit(bit, bm->cur.node->data);
734 }
735
736 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
737 {
738 void *addr;
739 unsigned int bit;
740 int error;
741
742 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
743 BUG_ON(error);
744 return test_bit(bit, addr);
745 }
746
747 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
748 {
749 void *addr;
750 unsigned int bit;
751
752 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
753 }
754
755 /*
756 * rtree_next_node - Jumps to the next leave node
757 *
758 * Sets the position to the beginning of the next node in the
759 * memory bitmap. This is either the next node in the current
760 * zone's radix tree or the first node in the radix tree of the
761 * next zone.
762 *
763 * Returns true if there is a next node, false otherwise.
764 */
765 static bool rtree_next_node(struct memory_bitmap *bm)
766 {
767 bm->cur.node = list_entry(bm->cur.node->list.next,
768 struct rtree_node, list);
769 if (&bm->cur.node->list != &bm->cur.zone->leaves) {
770 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
771 bm->cur.node_bit = 0;
772 touch_softlockup_watchdog();
773 return true;
774 }
775
776 /* No more nodes, goto next zone */
777 bm->cur.zone = list_entry(bm->cur.zone->list.next,
778 struct mem_zone_bm_rtree, list);
779 if (&bm->cur.zone->list != &bm->zones) {
780 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
781 struct rtree_node, list);
782 bm->cur.node_pfn = 0;
783 bm->cur.node_bit = 0;
784 return true;
785 }
786
787 /* No more zones */
788 return false;
789 }
790
791 /**
792 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
793 *
794 * Starting from the last returned position this function searches
795 * for the next set bit in the memory bitmap and returns its
796 * number. If no more bit is set BM_END_OF_MAP is returned.
797 *
798 * It is required to run memory_bm_position_reset() before the
799 * first call to this function.
800 */
801 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
802 {
803 unsigned long bits, pfn, pages;
804 int bit;
805
806 do {
807 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
808 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
809 bit = find_next_bit(bm->cur.node->data, bits,
810 bm->cur.node_bit);
811 if (bit < bits) {
812 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
813 bm->cur.node_bit = bit + 1;
814 return pfn;
815 }
816 } while (rtree_next_node(bm));
817
818 return BM_END_OF_MAP;
819 }
820
821 /**
822 * This structure represents a range of page frames the contents of which
823 * should not be saved during the suspend.
824 */
825
826 struct nosave_region {
827 struct list_head list;
828 unsigned long start_pfn;
829 unsigned long end_pfn;
830 };
831
832 static LIST_HEAD(nosave_regions);
833
834 /**
835 * register_nosave_region - register a range of page frames the contents
836 * of which should not be saved during the suspend (to be used in the early
837 * initialization code)
838 */
839
840 void __init
841 __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
842 int use_kmalloc)
843 {
844 struct nosave_region *region;
845
846 if (start_pfn >= end_pfn)
847 return;
848
849 if (!list_empty(&nosave_regions)) {
850 /* Try to extend the previous region (they should be sorted) */
851 region = list_entry(nosave_regions.prev,
852 struct nosave_region, list);
853 if (region->end_pfn == start_pfn) {
854 region->end_pfn = end_pfn;
855 goto Report;
856 }
857 }
858 if (use_kmalloc) {
859 /* during init, this shouldn't fail */
860 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
861 BUG_ON(!region);
862 } else
863 /* This allocation cannot fail */
864 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
865 region->start_pfn = start_pfn;
866 region->end_pfn = end_pfn;
867 list_add_tail(&region->list, &nosave_regions);
868 Report:
869 printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
870 (unsigned long long) start_pfn << PAGE_SHIFT,
871 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
872 }
873
874 /*
875 * Set bits in this map correspond to the page frames the contents of which
876 * should not be saved during the suspend.
877 */
878 static struct memory_bitmap *forbidden_pages_map;
879
880 /* Set bits in this map correspond to free page frames. */
881 static struct memory_bitmap *free_pages_map;
882
883 /*
884 * Each page frame allocated for creating the image is marked by setting the
885 * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
886 */
887
888 void swsusp_set_page_free(struct page *page)
889 {
890 if (free_pages_map)
891 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
892 }
893
894 static int swsusp_page_is_free(struct page *page)
895 {
896 return free_pages_map ?
897 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
898 }
899
900 void swsusp_unset_page_free(struct page *page)
901 {
902 if (free_pages_map)
903 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
904 }
905
906 static void swsusp_set_page_forbidden(struct page *page)
907 {
908 if (forbidden_pages_map)
909 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
910 }
911
912 int swsusp_page_is_forbidden(struct page *page)
913 {
914 return forbidden_pages_map ?
915 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
916 }
917
918 static void swsusp_unset_page_forbidden(struct page *page)
919 {
920 if (forbidden_pages_map)
921 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
922 }
923
924 /**
925 * mark_nosave_pages - set bits corresponding to the page frames the
926 * contents of which should not be saved in a given bitmap.
927 */
928
929 static void mark_nosave_pages(struct memory_bitmap *bm)
930 {
931 struct nosave_region *region;
932
933 if (list_empty(&nosave_regions))
934 return;
935
936 list_for_each_entry(region, &nosave_regions, list) {
937 unsigned long pfn;
938
939 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
940 (unsigned long long) region->start_pfn << PAGE_SHIFT,
941 ((unsigned long long) region->end_pfn << PAGE_SHIFT)
942 - 1);
943
944 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
945 if (pfn_valid(pfn)) {
946 /*
947 * It is safe to ignore the result of
948 * mem_bm_set_bit_check() here, since we won't
949 * touch the PFNs for which the error is
950 * returned anyway.
951 */
952 mem_bm_set_bit_check(bm, pfn);
953 }
954 }
955 }
956
957 /**
958 * create_basic_memory_bitmaps - create bitmaps needed for marking page
959 * frames that should not be saved and free page frames. The pointers
960 * forbidden_pages_map and free_pages_map are only modified if everything
961 * goes well, because we don't want the bits to be used before both bitmaps
962 * are set up.
963 */
964
965 int create_basic_memory_bitmaps(void)
966 {
967 struct memory_bitmap *bm1, *bm2;
968 int error = 0;
969
970 if (forbidden_pages_map && free_pages_map)
971 return 0;
972 else
973 BUG_ON(forbidden_pages_map || free_pages_map);
974
975 bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
976 if (!bm1)
977 return -ENOMEM;
978
979 error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
980 if (error)
981 goto Free_first_object;
982
983 bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
984 if (!bm2)
985 goto Free_first_bitmap;
986
987 error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
988 if (error)
989 goto Free_second_object;
990
991 forbidden_pages_map = bm1;
992 free_pages_map = bm2;
993 mark_nosave_pages(forbidden_pages_map);
994
995 pr_debug("PM: Basic memory bitmaps created\n");
996
997 return 0;
998
999 Free_second_object:
1000 kfree(bm2);
1001 Free_first_bitmap:
1002 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1003 Free_first_object:
1004 kfree(bm1);
1005 return -ENOMEM;
1006 }
1007
1008 /**
1009 * free_basic_memory_bitmaps - free memory bitmaps allocated by
1010 * create_basic_memory_bitmaps(). The auxiliary pointers are necessary
1011 * so that the bitmaps themselves are not referred to while they are being
1012 * freed.
1013 */
1014
1015 void free_basic_memory_bitmaps(void)
1016 {
1017 struct memory_bitmap *bm1, *bm2;
1018
1019 if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1020 return;
1021
1022 bm1 = forbidden_pages_map;
1023 bm2 = free_pages_map;
1024 forbidden_pages_map = NULL;
1025 free_pages_map = NULL;
1026 memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1027 kfree(bm1);
1028 memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1029 kfree(bm2);
1030
1031 pr_debug("PM: Basic memory bitmaps freed\n");
1032 }
1033
1034 /**
1035 * snapshot_additional_pages - estimate the number of additional pages
1036 * be needed for setting up the suspend image data structures for given
1037 * zone (usually the returned value is greater than the exact number)
1038 */
1039
1040 unsigned int snapshot_additional_pages(struct zone *zone)
1041 {
1042 unsigned int rtree, nodes;
1043
1044 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1045 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1046 LINKED_PAGE_DATA_SIZE);
1047 while (nodes > 1) {
1048 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1049 rtree += nodes;
1050 }
1051
1052 return 2 * rtree;
1053 }
1054
1055 #ifdef CONFIG_HIGHMEM
1056 /**
1057 * count_free_highmem_pages - compute the total number of free highmem
1058 * pages, system-wide.
1059 */
1060
1061 static unsigned int count_free_highmem_pages(void)
1062 {
1063 struct zone *zone;
1064 unsigned int cnt = 0;
1065
1066 for_each_populated_zone(zone)
1067 if (is_highmem(zone))
1068 cnt += zone_page_state(zone, NR_FREE_PAGES);
1069
1070 return cnt;
1071 }
1072
1073 /**
1074 * saveable_highmem_page - Determine whether a highmem page should be
1075 * included in the suspend image.
1076 *
1077 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1078 * and it isn't a part of a free chunk of pages.
1079 */
1080 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1081 {
1082 struct page *page;
1083
1084 if (!pfn_valid(pfn))
1085 return NULL;
1086
1087 page = pfn_to_page(pfn);
1088 if (page_zone(page) != zone)
1089 return NULL;
1090
1091 BUG_ON(!PageHighMem(page));
1092
1093 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
1094 PageReserved(page))
1095 return NULL;
1096
1097 if (page_is_guard(page))
1098 return NULL;
1099
1100 return page;
1101 }
1102
1103 /**
1104 * count_highmem_pages - compute the total number of saveable highmem
1105 * pages.
1106 */
1107
1108 static unsigned int count_highmem_pages(void)
1109 {
1110 struct zone *zone;
1111 unsigned int n = 0;
1112
1113 for_each_populated_zone(zone) {
1114 unsigned long pfn, max_zone_pfn;
1115
1116 if (!is_highmem(zone))
1117 continue;
1118
1119 mark_free_pages(zone);
1120 max_zone_pfn = zone_end_pfn(zone);
1121 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1122 if (saveable_highmem_page(zone, pfn))
1123 n++;
1124 }
1125 return n;
1126 }
1127 #else
1128 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1129 {
1130 return NULL;
1131 }
1132 #endif /* CONFIG_HIGHMEM */
1133
1134 /**
1135 * saveable_page - Determine whether a non-highmem page should be included
1136 * in the suspend image.
1137 *
1138 * We should save the page if it isn't Nosave, and is not in the range
1139 * of pages statically defined as 'unsaveable', and it isn't a part of
1140 * a free chunk of pages.
1141 */
1142 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1143 {
1144 struct page *page;
1145
1146 if (!pfn_valid(pfn))
1147 return NULL;
1148
1149 page = pfn_to_page(pfn);
1150 if (page_zone(page) != zone)
1151 return NULL;
1152
1153 BUG_ON(PageHighMem(page));
1154
1155 if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1156 return NULL;
1157
1158 if (PageReserved(page)
1159 && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1160 return NULL;
1161
1162 if (page_is_guard(page))
1163 return NULL;
1164
1165 return page;
1166 }
1167
1168 /**
1169 * count_data_pages - compute the total number of saveable non-highmem
1170 * pages.
1171 */
1172
1173 static unsigned int count_data_pages(void)
1174 {
1175 struct zone *zone;
1176 unsigned long pfn, max_zone_pfn;
1177 unsigned int n = 0;
1178
1179 for_each_populated_zone(zone) {
1180 if (is_highmem(zone))
1181 continue;
1182
1183 mark_free_pages(zone);
1184 max_zone_pfn = zone_end_pfn(zone);
1185 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1186 if (saveable_page(zone, pfn))
1187 n++;
1188 }
1189 return n;
1190 }
1191
1192 /* This is needed, because copy_page and memcpy are not usable for copying
1193 * task structs.
1194 */
1195 static inline void do_copy_page(long *dst, long *src)
1196 {
1197 int n;
1198
1199 for (n = PAGE_SIZE / sizeof(long); n; n--)
1200 *dst++ = *src++;
1201 }
1202
1203
1204 /**
1205 * safe_copy_page - check if the page we are going to copy is marked as
1206 * present in the kernel page tables (this always is the case if
1207 * CONFIG_DEBUG_PAGEALLOC is not set and in that case
1208 * kernel_page_present() always returns 'true').
1209 */
1210 static void safe_copy_page(void *dst, struct page *s_page)
1211 {
1212 if (kernel_page_present(s_page)) {
1213 do_copy_page(dst, page_address(s_page));
1214 } else {
1215 kernel_map_pages(s_page, 1, 1);
1216 do_copy_page(dst, page_address(s_page));
1217 kernel_map_pages(s_page, 1, 0);
1218 }
1219 }
1220
1221
1222 #ifdef CONFIG_HIGHMEM
1223 static inline struct page *
1224 page_is_saveable(struct zone *zone, unsigned long pfn)
1225 {
1226 return is_highmem(zone) ?
1227 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1228 }
1229
1230 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1231 {
1232 struct page *s_page, *d_page;
1233 void *src, *dst;
1234
1235 s_page = pfn_to_page(src_pfn);
1236 d_page = pfn_to_page(dst_pfn);
1237 if (PageHighMem(s_page)) {
1238 src = kmap_atomic(s_page);
1239 dst = kmap_atomic(d_page);
1240 do_copy_page(dst, src);
1241 kunmap_atomic(dst);
1242 kunmap_atomic(src);
1243 } else {
1244 if (PageHighMem(d_page)) {
1245 /* Page pointed to by src may contain some kernel
1246 * data modified by kmap_atomic()
1247 */
1248 safe_copy_page(buffer, s_page);
1249 dst = kmap_atomic(d_page);
1250 copy_page(dst, buffer);
1251 kunmap_atomic(dst);
1252 } else {
1253 safe_copy_page(page_address(d_page), s_page);
1254 }
1255 }
1256 }
1257 #else
1258 #define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
1259
1260 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1261 {
1262 safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1263 pfn_to_page(src_pfn));
1264 }
1265 #endif /* CONFIG_HIGHMEM */
1266
1267 static void
1268 copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
1269 {
1270 struct zone *zone;
1271 unsigned long pfn;
1272
1273 for_each_populated_zone(zone) {
1274 unsigned long max_zone_pfn;
1275
1276 mark_free_pages(zone);
1277 max_zone_pfn = zone_end_pfn(zone);
1278 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1279 if (page_is_saveable(zone, pfn))
1280 memory_bm_set_bit(orig_bm, pfn);
1281 }
1282 memory_bm_position_reset(orig_bm);
1283 memory_bm_position_reset(copy_bm);
1284 for(;;) {
1285 pfn = memory_bm_next_pfn(orig_bm);
1286 if (unlikely(pfn == BM_END_OF_MAP))
1287 break;
1288 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1289 }
1290 }
1291
1292 /* Total number of image pages */
1293 static unsigned int nr_copy_pages;
1294 /* Number of pages needed for saving the original pfns of the image pages */
1295 static unsigned int nr_meta_pages;
1296 /*
1297 * Numbers of normal and highmem page frames allocated for hibernation image
1298 * before suspending devices.
1299 */
1300 unsigned int alloc_normal, alloc_highmem;
1301 /*
1302 * Memory bitmap used for marking saveable pages (during hibernation) or
1303 * hibernation image pages (during restore)
1304 */
1305 static struct memory_bitmap orig_bm;
1306 /*
1307 * Memory bitmap used during hibernation for marking allocated page frames that
1308 * will contain copies of saveable pages. During restore it is initially used
1309 * for marking hibernation image pages, but then the set bits from it are
1310 * duplicated in @orig_bm and it is released. On highmem systems it is next
1311 * used for marking "safe" highmem pages, but it has to be reinitialized for
1312 * this purpose.
1313 */
1314 static struct memory_bitmap copy_bm;
1315
1316 /**
1317 * swsusp_free - free pages allocated for the suspend.
1318 *
1319 * Suspend pages are alocated before the atomic copy is made, so we
1320 * need to release them after the resume.
1321 */
1322
1323 void swsusp_free(void)
1324 {
1325 unsigned long fb_pfn, fr_pfn;
1326
1327 memory_bm_position_reset(forbidden_pages_map);
1328 memory_bm_position_reset(free_pages_map);
1329
1330 loop:
1331 fr_pfn = memory_bm_next_pfn(free_pages_map);
1332 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1333
1334 /*
1335 * Find the next bit set in both bitmaps. This is guaranteed to
1336 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1337 */
1338 do {
1339 if (fb_pfn < fr_pfn)
1340 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1341 if (fr_pfn < fb_pfn)
1342 fr_pfn = memory_bm_next_pfn(free_pages_map);
1343 } while (fb_pfn != fr_pfn);
1344
1345 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1346 struct page *page = pfn_to_page(fr_pfn);
1347
1348 memory_bm_clear_current(forbidden_pages_map);
1349 memory_bm_clear_current(free_pages_map);
1350 __free_page(page);
1351 goto loop;
1352 }
1353
1354 nr_copy_pages = 0;
1355 nr_meta_pages = 0;
1356 restore_pblist = NULL;
1357 buffer = NULL;
1358 alloc_normal = 0;
1359 alloc_highmem = 0;
1360 }
1361
1362 /* Helper functions used for the shrinking of memory. */
1363
1364 #define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
1365
1366 /**
1367 * preallocate_image_pages - Allocate a number of pages for hibernation image
1368 * @nr_pages: Number of page frames to allocate.
1369 * @mask: GFP flags to use for the allocation.
1370 *
1371 * Return value: Number of page frames actually allocated
1372 */
1373 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1374 {
1375 unsigned long nr_alloc = 0;
1376
1377 while (nr_pages > 0) {
1378 struct page *page;
1379
1380 page = alloc_image_page(mask);
1381 if (!page)
1382 break;
1383 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1384 if (PageHighMem(page))
1385 alloc_highmem++;
1386 else
1387 alloc_normal++;
1388 nr_pages--;
1389 nr_alloc++;
1390 }
1391
1392 return nr_alloc;
1393 }
1394
1395 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1396 unsigned long avail_normal)
1397 {
1398 unsigned long alloc;
1399
1400 if (avail_normal <= alloc_normal)
1401 return 0;
1402
1403 alloc = avail_normal - alloc_normal;
1404 if (nr_pages < alloc)
1405 alloc = nr_pages;
1406
1407 return preallocate_image_pages(alloc, GFP_IMAGE);
1408 }
1409
1410 #ifdef CONFIG_HIGHMEM
1411 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1412 {
1413 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1414 }
1415
1416 /**
1417 * __fraction - Compute (an approximation of) x * (multiplier / base)
1418 */
1419 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1420 {
1421 x *= multiplier;
1422 do_div(x, base);
1423 return (unsigned long)x;
1424 }
1425
1426 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1427 unsigned long highmem,
1428 unsigned long total)
1429 {
1430 unsigned long alloc = __fraction(nr_pages, highmem, total);
1431
1432 return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1433 }
1434 #else /* CONFIG_HIGHMEM */
1435 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1436 {
1437 return 0;
1438 }
1439
1440 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1441 unsigned long highmem,
1442 unsigned long total)
1443 {
1444 return 0;
1445 }
1446 #endif /* CONFIG_HIGHMEM */
1447
1448 /**
1449 * free_unnecessary_pages - Release preallocated pages not needed for the image
1450 */
1451 static void free_unnecessary_pages(void)
1452 {
1453 unsigned long save, to_free_normal, to_free_highmem;
1454
1455 save = count_data_pages();
1456 if (alloc_normal >= save) {
1457 to_free_normal = alloc_normal - save;
1458 save = 0;
1459 } else {
1460 to_free_normal = 0;
1461 save -= alloc_normal;
1462 }
1463 save += count_highmem_pages();
1464 if (alloc_highmem >= save) {
1465 to_free_highmem = alloc_highmem - save;
1466 } else {
1467 to_free_highmem = 0;
1468 save -= alloc_highmem;
1469 if (to_free_normal > save)
1470 to_free_normal -= save;
1471 else
1472 to_free_normal = 0;
1473 }
1474
1475 memory_bm_position_reset(&copy_bm);
1476
1477 while (to_free_normal > 0 || to_free_highmem > 0) {
1478 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1479 struct page *page = pfn_to_page(pfn);
1480
1481 if (PageHighMem(page)) {
1482 if (!to_free_highmem)
1483 continue;
1484 to_free_highmem--;
1485 alloc_highmem--;
1486 } else {
1487 if (!to_free_normal)
1488 continue;
1489 to_free_normal--;
1490 alloc_normal--;
1491 }
1492 memory_bm_clear_bit(&copy_bm, pfn);
1493 swsusp_unset_page_forbidden(page);
1494 swsusp_unset_page_free(page);
1495 __free_page(page);
1496 }
1497 }
1498
1499 /**
1500 * minimum_image_size - Estimate the minimum acceptable size of an image
1501 * @saveable: Number of saveable pages in the system.
1502 *
1503 * We want to avoid attempting to free too much memory too hard, so estimate the
1504 * minimum acceptable size of a hibernation image to use as the lower limit for
1505 * preallocating memory.
1506 *
1507 * We assume that the minimum image size should be proportional to
1508 *
1509 * [number of saveable pages] - [number of pages that can be freed in theory]
1510 *
1511 * where the second term is the sum of (1) reclaimable slab pages, (2) active
1512 * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1513 * minus mapped file pages.
1514 */
1515 static unsigned long minimum_image_size(unsigned long saveable)
1516 {
1517 unsigned long size;
1518
1519 size = global_page_state(NR_SLAB_RECLAIMABLE)
1520 + global_page_state(NR_ACTIVE_ANON)
1521 + global_page_state(NR_INACTIVE_ANON)
1522 + global_page_state(NR_ACTIVE_FILE)
1523 + global_page_state(NR_INACTIVE_FILE)
1524 - global_page_state(NR_FILE_MAPPED);
1525
1526 return saveable <= size ? 0 : saveable - size;
1527 }
1528
1529 /**
1530 * hibernate_preallocate_memory - Preallocate memory for hibernation image
1531 *
1532 * To create a hibernation image it is necessary to make a copy of every page
1533 * frame in use. We also need a number of page frames to be free during
1534 * hibernation for allocations made while saving the image and for device
1535 * drivers, in case they need to allocate memory from their hibernation
1536 * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1537 * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1538 * /sys/power/reserved_size, respectively). To make this happen, we compute the
1539 * total number of available page frames and allocate at least
1540 *
1541 * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1542 * + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1543 *
1544 * of them, which corresponds to the maximum size of a hibernation image.
1545 *
1546 * If image_size is set below the number following from the above formula,
1547 * the preallocation of memory is continued until the total number of saveable
1548 * pages in the system is below the requested image size or the minimum
1549 * acceptable image size returned by minimum_image_size(), whichever is greater.
1550 */
1551 int hibernate_preallocate_memory(void)
1552 {
1553 struct zone *zone;
1554 unsigned long saveable, size, max_size, count, highmem, pages = 0;
1555 unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1556 struct timeval start, stop;
1557 int error;
1558
1559 printk(KERN_INFO "PM: Preallocating image memory... ");
1560 do_gettimeofday(&start);
1561
1562 error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1563 if (error)
1564 goto err_out;
1565
1566 error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1567 if (error)
1568 goto err_out;
1569
1570 alloc_normal = 0;
1571 alloc_highmem = 0;
1572
1573 /* Count the number of saveable data pages. */
1574 save_highmem = count_highmem_pages();
1575 saveable = count_data_pages();
1576
1577 /*
1578 * Compute the total number of page frames we can use (count) and the
1579 * number of pages needed for image metadata (size).
1580 */
1581 count = saveable;
1582 saveable += save_highmem;
1583 highmem = save_highmem;
1584 size = 0;
1585 for_each_populated_zone(zone) {
1586 size += snapshot_additional_pages(zone);
1587 if (is_highmem(zone))
1588 highmem += zone_page_state(zone, NR_FREE_PAGES);
1589 else
1590 count += zone_page_state(zone, NR_FREE_PAGES);
1591 }
1592 avail_normal = count;
1593 count += highmem;
1594 count -= totalreserve_pages;
1595
1596 /* Add number of pages required for page keys (s390 only). */
1597 size += page_key_additional_pages(saveable);
1598
1599 /* Compute the maximum number of saveable pages to leave in memory. */
1600 max_size = (count - (size + PAGES_FOR_IO)) / 2
1601 - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1602 /* Compute the desired number of image pages specified by image_size. */
1603 size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1604 if (size > max_size)
1605 size = max_size;
1606 /*
1607 * If the desired number of image pages is at least as large as the
1608 * current number of saveable pages in memory, allocate page frames for
1609 * the image and we're done.
1610 */
1611 if (size >= saveable) {
1612 pages = preallocate_image_highmem(save_highmem);
1613 pages += preallocate_image_memory(saveable - pages, avail_normal);
1614 goto out;
1615 }
1616
1617 /* Estimate the minimum size of the image. */
1618 pages = minimum_image_size(saveable);
1619 /*
1620 * To avoid excessive pressure on the normal zone, leave room in it to
1621 * accommodate an image of the minimum size (unless it's already too
1622 * small, in which case don't preallocate pages from it at all).
1623 */
1624 if (avail_normal > pages)
1625 avail_normal -= pages;
1626 else
1627 avail_normal = 0;
1628 if (size < pages)
1629 size = min_t(unsigned long, pages, max_size);
1630
1631 /*
1632 * Let the memory management subsystem know that we're going to need a
1633 * large number of page frames to allocate and make it free some memory.
1634 * NOTE: If this is not done, performance will be hurt badly in some
1635 * test cases.
1636 */
1637 shrink_all_memory(saveable - size);
1638
1639 /*
1640 * The number of saveable pages in memory was too high, so apply some
1641 * pressure to decrease it. First, make room for the largest possible
1642 * image and fail if that doesn't work. Next, try to decrease the size
1643 * of the image as much as indicated by 'size' using allocations from
1644 * highmem and non-highmem zones separately.
1645 */
1646 pages_highmem = preallocate_image_highmem(highmem / 2);
1647 alloc = count - max_size;
1648 if (alloc > pages_highmem)
1649 alloc -= pages_highmem;
1650 else
1651 alloc = 0;
1652 pages = preallocate_image_memory(alloc, avail_normal);
1653 if (pages < alloc) {
1654 /* We have exhausted non-highmem pages, try highmem. */
1655 alloc -= pages;
1656 pages += pages_highmem;
1657 pages_highmem = preallocate_image_highmem(alloc);
1658 if (pages_highmem < alloc)
1659 goto err_out;
1660 pages += pages_highmem;
1661 /*
1662 * size is the desired number of saveable pages to leave in
1663 * memory, so try to preallocate (all memory - size) pages.
1664 */
1665 alloc = (count - pages) - size;
1666 pages += preallocate_image_highmem(alloc);
1667 } else {
1668 /*
1669 * There are approximately max_size saveable pages at this point
1670 * and we want to reduce this number down to size.
1671 */
1672 alloc = max_size - size;
1673 size = preallocate_highmem_fraction(alloc, highmem, count);
1674 pages_highmem += size;
1675 alloc -= size;
1676 size = preallocate_image_memory(alloc, avail_normal);
1677 pages_highmem += preallocate_image_highmem(alloc - size);
1678 pages += pages_highmem + size;
1679 }
1680
1681 /*
1682 * We only need as many page frames for the image as there are saveable
1683 * pages in memory, but we have allocated more. Release the excessive
1684 * ones now.
1685 */
1686 free_unnecessary_pages();
1687
1688 out:
1689 do_gettimeofday(&stop);
1690 printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1691 swsusp_show_speed(&start, &stop, pages, "Allocated");
1692
1693 return 0;
1694
1695 err_out:
1696 printk(KERN_CONT "\n");
1697 swsusp_free();
1698 return -ENOMEM;
1699 }
1700
1701 #ifdef CONFIG_HIGHMEM
1702 /**
1703 * count_pages_for_highmem - compute the number of non-highmem pages
1704 * that will be necessary for creating copies of highmem pages.
1705 */
1706
1707 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1708 {
1709 unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1710
1711 if (free_highmem >= nr_highmem)
1712 nr_highmem = 0;
1713 else
1714 nr_highmem -= free_highmem;
1715
1716 return nr_highmem;
1717 }
1718 #else
1719 static unsigned int
1720 count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1721 #endif /* CONFIG_HIGHMEM */
1722
1723 /**
1724 * enough_free_mem - Make sure we have enough free memory for the
1725 * snapshot image.
1726 */
1727
1728 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1729 {
1730 struct zone *zone;
1731 unsigned int free = alloc_normal;
1732
1733 for_each_populated_zone(zone)
1734 if (!is_highmem(zone))
1735 free += zone_page_state(zone, NR_FREE_PAGES);
1736
1737 nr_pages += count_pages_for_highmem(nr_highmem);
1738 pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1739 nr_pages, PAGES_FOR_IO, free);
1740
1741 return free > nr_pages + PAGES_FOR_IO;
1742 }
1743
1744 #ifdef CONFIG_HIGHMEM
1745 /**
1746 * get_highmem_buffer - if there are some highmem pages in the suspend
1747 * image, we may need the buffer to copy them and/or load their data.
1748 */
1749
1750 static inline int get_highmem_buffer(int safe_needed)
1751 {
1752 buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1753 return buffer ? 0 : -ENOMEM;
1754 }
1755
1756 /**
1757 * alloc_highmem_image_pages - allocate some highmem pages for the image.
1758 * Try to allocate as many pages as needed, but if the number of free
1759 * highmem pages is lesser than that, allocate them all.
1760 */
1761
1762 static inline unsigned int
1763 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
1764 {
1765 unsigned int to_alloc = count_free_highmem_pages();
1766
1767 if (to_alloc > nr_highmem)
1768 to_alloc = nr_highmem;
1769
1770 nr_highmem -= to_alloc;
1771 while (to_alloc-- > 0) {
1772 struct page *page;
1773
1774 page = alloc_image_page(__GFP_HIGHMEM);
1775 memory_bm_set_bit(bm, page_to_pfn(page));
1776 }
1777 return nr_highmem;
1778 }
1779 #else
1780 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1781
1782 static inline unsigned int
1783 alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
1784 #endif /* CONFIG_HIGHMEM */
1785
1786 /**
1787 * swsusp_alloc - allocate memory for the suspend image
1788 *
1789 * We first try to allocate as many highmem pages as there are
1790 * saveable highmem pages in the system. If that fails, we allocate
1791 * non-highmem pages for the copies of the remaining highmem ones.
1792 *
1793 * In this approach it is likely that the copies of highmem pages will
1794 * also be located in the high memory, because of the way in which
1795 * copy_data_pages() works.
1796 */
1797
1798 static int
1799 swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1800 unsigned int nr_pages, unsigned int nr_highmem)
1801 {
1802 if (nr_highmem > 0) {
1803 if (get_highmem_buffer(PG_ANY))
1804 goto err_out;
1805 if (nr_highmem > alloc_highmem) {
1806 nr_highmem -= alloc_highmem;
1807 nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1808 }
1809 }
1810 if (nr_pages > alloc_normal) {
1811 nr_pages -= alloc_normal;
1812 while (nr_pages-- > 0) {
1813 struct page *page;
1814
1815 page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1816 if (!page)
1817 goto err_out;
1818 memory_bm_set_bit(copy_bm, page_to_pfn(page));
1819 }
1820 }
1821
1822 return 0;
1823
1824 err_out:
1825 swsusp_free();
1826 return -ENOMEM;
1827 }
1828
1829 asmlinkage __visible int swsusp_save(void)
1830 {
1831 unsigned int nr_pages, nr_highmem;
1832
1833 printk(KERN_INFO "PM: Creating hibernation image:\n");
1834
1835 drain_local_pages(NULL);
1836 nr_pages = count_data_pages();
1837 nr_highmem = count_highmem_pages();
1838 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1839
1840 if (!enough_free_mem(nr_pages, nr_highmem)) {
1841 printk(KERN_ERR "PM: Not enough free memory\n");
1842 return -ENOMEM;
1843 }
1844
1845 if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1846 printk(KERN_ERR "PM: Memory allocation failed\n");
1847 return -ENOMEM;
1848 }
1849
1850 /* During allocating of suspend pagedir, new cold pages may appear.
1851 * Kill them.
1852 */
1853 drain_local_pages(NULL);
1854 copy_data_pages(&copy_bm, &orig_bm);
1855
1856 /*
1857 * End of critical section. From now on, we can write to memory,
1858 * but we should not touch disk. This specially means we must _not_
1859 * touch swap space! Except we must write out our image of course.
1860 */
1861
1862 nr_pages += nr_highmem;
1863 nr_copy_pages = nr_pages;
1864 nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1865
1866 printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1867 nr_pages);
1868
1869 return 0;
1870 }
1871
1872 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1873 static int init_header_complete(struct swsusp_info *info)
1874 {
1875 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1876 info->version_code = LINUX_VERSION_CODE;
1877 return 0;
1878 }
1879
1880 static char *check_image_kernel(struct swsusp_info *info)
1881 {
1882 if (info->version_code != LINUX_VERSION_CODE)
1883 return "kernel version";
1884 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1885 return "system type";
1886 if (strcmp(info->uts.release,init_utsname()->release))
1887 return "kernel release";
1888 if (strcmp(info->uts.version,init_utsname()->version))
1889 return "version";
1890 if (strcmp(info->uts.machine,init_utsname()->machine))
1891 return "machine";
1892 return NULL;
1893 }
1894 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1895
1896 unsigned long snapshot_get_image_size(void)
1897 {
1898 return nr_copy_pages + nr_meta_pages + 1;
1899 }
1900
1901 static int init_header(struct swsusp_info *info)
1902 {
1903 memset(info, 0, sizeof(struct swsusp_info));
1904 info->num_physpages = get_num_physpages();
1905 info->image_pages = nr_copy_pages;
1906 info->pages = snapshot_get_image_size();
1907 info->size = info->pages;
1908 info->size <<= PAGE_SHIFT;
1909 return init_header_complete(info);
1910 }
1911
1912 /**
1913 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1914 * are stored in the array @buf[] (1 page at a time)
1915 */
1916
1917 static inline void
1918 pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1919 {
1920 int j;
1921
1922 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1923 buf[j] = memory_bm_next_pfn(bm);
1924 if (unlikely(buf[j] == BM_END_OF_MAP))
1925 break;
1926 /* Save page key for data page (s390 only). */
1927 page_key_read(buf + j);
1928 }
1929 }
1930
1931 /**
1932 * snapshot_read_next - used for reading the system memory snapshot.
1933 *
1934 * On the first call to it @handle should point to a zeroed
1935 * snapshot_handle structure. The structure gets updated and a pointer
1936 * to it should be passed to this function every next time.
1937 *
1938 * On success the function returns a positive number. Then, the caller
1939 * is allowed to read up to the returned number of bytes from the memory
1940 * location computed by the data_of() macro.
1941 *
1942 * The function returns 0 to indicate the end of data stream condition,
1943 * and a negative number is returned on error. In such cases the
1944 * structure pointed to by @handle is not updated and should not be used
1945 * any more.
1946 */
1947
1948 int snapshot_read_next(struct snapshot_handle *handle)
1949 {
1950 if (handle->cur > nr_meta_pages + nr_copy_pages)
1951 return 0;
1952
1953 if (!buffer) {
1954 /* This makes the buffer be freed by swsusp_free() */
1955 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
1956 if (!buffer)
1957 return -ENOMEM;
1958 }
1959 if (!handle->cur) {
1960 int error;
1961
1962 error = init_header((struct swsusp_info *)buffer);
1963 if (error)
1964 return error;
1965 handle->buffer = buffer;
1966 memory_bm_position_reset(&orig_bm);
1967 memory_bm_position_reset(&copy_bm);
1968 } else if (handle->cur <= nr_meta_pages) {
1969 clear_page(buffer);
1970 pack_pfns(buffer, &orig_bm);
1971 } else {
1972 struct page *page;
1973
1974 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
1975 if (PageHighMem(page)) {
1976 /* Highmem pages are copied to the buffer,
1977 * because we can't return with a kmapped
1978 * highmem page (we may not be called again).
1979 */
1980 void *kaddr;
1981
1982 kaddr = kmap_atomic(page);
1983 copy_page(buffer, kaddr);
1984 kunmap_atomic(kaddr);
1985 handle->buffer = buffer;
1986 } else {
1987 handle->buffer = page_address(page);
1988 }
1989 }
1990 handle->cur++;
1991 return PAGE_SIZE;
1992 }
1993
1994 /**
1995 * mark_unsafe_pages - mark the pages that cannot be used for storing
1996 * the image during resume, because they conflict with the pages that
1997 * had been used before suspend
1998 */
1999
2000 static int mark_unsafe_pages(struct memory_bitmap *bm)
2001 {
2002 struct zone *zone;
2003 unsigned long pfn, max_zone_pfn;
2004
2005 /* Clear page flags */
2006 for_each_populated_zone(zone) {
2007 max_zone_pfn = zone_end_pfn(zone);
2008 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
2009 if (pfn_valid(pfn))
2010 swsusp_unset_page_free(pfn_to_page(pfn));
2011 }
2012
2013 /* Mark pages that correspond to the "original" pfns as "unsafe" */
2014 memory_bm_position_reset(bm);
2015 do {
2016 pfn = memory_bm_next_pfn(bm);
2017 if (likely(pfn != BM_END_OF_MAP)) {
2018 if (likely(pfn_valid(pfn)))
2019 swsusp_set_page_free(pfn_to_page(pfn));
2020 else
2021 return -EFAULT;
2022 }
2023 } while (pfn != BM_END_OF_MAP);
2024
2025 allocated_unsafe_pages = 0;
2026
2027 return 0;
2028 }
2029
2030 static void
2031 duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
2032 {
2033 unsigned long pfn;
2034
2035 memory_bm_position_reset(src);
2036 pfn = memory_bm_next_pfn(src);
2037 while (pfn != BM_END_OF_MAP) {
2038 memory_bm_set_bit(dst, pfn);
2039 pfn = memory_bm_next_pfn(src);
2040 }
2041 }
2042
2043 static int check_header(struct swsusp_info *info)
2044 {
2045 char *reason;
2046
2047 reason = check_image_kernel(info);
2048 if (!reason && info->num_physpages != get_num_physpages())
2049 reason = "memory size";
2050 if (reason) {
2051 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2052 return -EPERM;
2053 }
2054 return 0;
2055 }
2056
2057 /**
2058 * load header - check the image header and copy data from it
2059 */
2060
2061 static int
2062 load_header(struct swsusp_info *info)
2063 {
2064 int error;
2065
2066 restore_pblist = NULL;
2067 error = check_header(info);
2068 if (!error) {
2069 nr_copy_pages = info->image_pages;
2070 nr_meta_pages = info->pages - info->image_pages - 1;
2071 }
2072 return error;
2073 }
2074
2075 /**
2076 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2077 * the corresponding bit in the memory bitmap @bm
2078 */
2079 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2080 {
2081 int j;
2082
2083 for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2084 if (unlikely(buf[j] == BM_END_OF_MAP))
2085 break;
2086
2087 /* Extract and buffer page key for data page (s390 only). */
2088 page_key_memorize(buf + j);
2089
2090 if (memory_bm_pfn_present(bm, buf[j]))
2091 memory_bm_set_bit(bm, buf[j]);
2092 else
2093 return -EFAULT;
2094 }
2095
2096 return 0;
2097 }
2098
2099 /* List of "safe" pages that may be used to store data loaded from the suspend
2100 * image
2101 */
2102 static struct linked_page *safe_pages_list;
2103
2104 #ifdef CONFIG_HIGHMEM
2105 /* struct highmem_pbe is used for creating the list of highmem pages that
2106 * should be restored atomically during the resume from disk, because the page
2107 * frames they have occupied before the suspend are in use.
2108 */
2109 struct highmem_pbe {
2110 struct page *copy_page; /* data is here now */
2111 struct page *orig_page; /* data was here before the suspend */
2112 struct highmem_pbe *next;
2113 };
2114
2115 /* List of highmem PBEs needed for restoring the highmem pages that were
2116 * allocated before the suspend and included in the suspend image, but have
2117 * also been allocated by the "resume" kernel, so their contents cannot be
2118 * written directly to their "original" page frames.
2119 */
2120 static struct highmem_pbe *highmem_pblist;
2121
2122 /**
2123 * count_highmem_image_pages - compute the number of highmem pages in the
2124 * suspend image. The bits in the memory bitmap @bm that correspond to the
2125 * image pages are assumed to be set.
2126 */
2127
2128 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2129 {
2130 unsigned long pfn;
2131 unsigned int cnt = 0;
2132
2133 memory_bm_position_reset(bm);
2134 pfn = memory_bm_next_pfn(bm);
2135 while (pfn != BM_END_OF_MAP) {
2136 if (PageHighMem(pfn_to_page(pfn)))
2137 cnt++;
2138
2139 pfn = memory_bm_next_pfn(bm);
2140 }
2141 return cnt;
2142 }
2143
2144 /**
2145 * prepare_highmem_image - try to allocate as many highmem pages as
2146 * there are highmem image pages (@nr_highmem_p points to the variable
2147 * containing the number of highmem image pages). The pages that are
2148 * "safe" (ie. will not be overwritten when the suspend image is
2149 * restored) have the corresponding bits set in @bm (it must be
2150 * unitialized).
2151 *
2152 * NOTE: This function should not be called if there are no highmem
2153 * image pages.
2154 */
2155
2156 static unsigned int safe_highmem_pages;
2157
2158 static struct memory_bitmap *safe_highmem_bm;
2159
2160 static int
2161 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2162 {
2163 unsigned int to_alloc;
2164
2165 if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2166 return -ENOMEM;
2167
2168 if (get_highmem_buffer(PG_SAFE))
2169 return -ENOMEM;
2170
2171 to_alloc = count_free_highmem_pages();
2172 if (to_alloc > *nr_highmem_p)
2173 to_alloc = *nr_highmem_p;
2174 else
2175 *nr_highmem_p = to_alloc;
2176
2177 safe_highmem_pages = 0;
2178 while (to_alloc-- > 0) {
2179 struct page *page;
2180
2181 page = alloc_page(__GFP_HIGHMEM);
2182 if (!swsusp_page_is_free(page)) {
2183 /* The page is "safe", set its bit the bitmap */
2184 memory_bm_set_bit(bm, page_to_pfn(page));
2185 safe_highmem_pages++;
2186 }
2187 /* Mark the page as allocated */
2188 swsusp_set_page_forbidden(page);
2189 swsusp_set_page_free(page);
2190 }
2191 memory_bm_position_reset(bm);
2192 safe_highmem_bm = bm;
2193 return 0;
2194 }
2195
2196 /**
2197 * get_highmem_page_buffer - for given highmem image page find the buffer
2198 * that suspend_write_next() should set for its caller to write to.
2199 *
2200 * If the page is to be saved to its "original" page frame or a copy of
2201 * the page is to be made in the highmem, @buffer is returned. Otherwise,
2202 * the copy of the page is to be made in normal memory, so the address of
2203 * the copy is returned.
2204 *
2205 * If @buffer is returned, the caller of suspend_write_next() will write
2206 * the page's contents to @buffer, so they will have to be copied to the
2207 * right location on the next call to suspend_write_next() and it is done
2208 * with the help of copy_last_highmem_page(). For this purpose, if
2209 * @buffer is returned, @last_highmem page is set to the page to which
2210 * the data will have to be copied from @buffer.
2211 */
2212
2213 static struct page *last_highmem_page;
2214
2215 static void *
2216 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2217 {
2218 struct highmem_pbe *pbe;
2219 void *kaddr;
2220
2221 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2222 /* We have allocated the "original" page frame and we can
2223 * use it directly to store the loaded page.
2224 */
2225 last_highmem_page = page;
2226 return buffer;
2227 }
2228 /* The "original" page frame has not been allocated and we have to
2229 * use a "safe" page frame to store the loaded page.
2230 */
2231 pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2232 if (!pbe) {
2233 swsusp_free();
2234 return ERR_PTR(-ENOMEM);
2235 }
2236 pbe->orig_page = page;
2237 if (safe_highmem_pages > 0) {
2238 struct page *tmp;
2239
2240 /* Copy of the page will be stored in high memory */
2241 kaddr = buffer;
2242 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2243 safe_highmem_pages--;
2244 last_highmem_page = tmp;
2245 pbe->copy_page = tmp;
2246 } else {
2247 /* Copy of the page will be stored in normal memory */
2248 kaddr = safe_pages_list;
2249 safe_pages_list = safe_pages_list->next;
2250 pbe->copy_page = virt_to_page(kaddr);
2251 }
2252 pbe->next = highmem_pblist;
2253 highmem_pblist = pbe;
2254 return kaddr;
2255 }
2256
2257 /**
2258 * copy_last_highmem_page - copy the contents of a highmem image from
2259 * @buffer, where the caller of snapshot_write_next() has place them,
2260 * to the right location represented by @last_highmem_page .
2261 */
2262
2263 static void copy_last_highmem_page(void)
2264 {
2265 if (last_highmem_page) {
2266 void *dst;
2267
2268 dst = kmap_atomic(last_highmem_page);
2269 copy_page(dst, buffer);
2270 kunmap_atomic(dst);
2271 last_highmem_page = NULL;
2272 }
2273 }
2274
2275 static inline int last_highmem_page_copied(void)
2276 {
2277 return !last_highmem_page;
2278 }
2279
2280 static inline void free_highmem_data(void)
2281 {
2282 if (safe_highmem_bm)
2283 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2284
2285 if (buffer)
2286 free_image_page(buffer, PG_UNSAFE_CLEAR);
2287 }
2288 #else
2289 static inline int get_safe_write_buffer(void) { return 0; }
2290
2291 static unsigned int
2292 count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2293
2294 static inline int
2295 prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
2296 {
2297 return 0;
2298 }
2299
2300 static inline void *
2301 get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
2302 {
2303 return ERR_PTR(-EINVAL);
2304 }
2305
2306 static inline void copy_last_highmem_page(void) {}
2307 static inline int last_highmem_page_copied(void) { return 1; }
2308 static inline void free_highmem_data(void) {}
2309 #endif /* CONFIG_HIGHMEM */
2310
2311 /**
2312 * prepare_image - use the memory bitmap @bm to mark the pages that will
2313 * be overwritten in the process of restoring the system memory state
2314 * from the suspend image ("unsafe" pages) and allocate memory for the
2315 * image.
2316 *
2317 * The idea is to allocate a new memory bitmap first and then allocate
2318 * as many pages as needed for the image data, but not to assign these
2319 * pages to specific tasks initially. Instead, we just mark them as
2320 * allocated and create a lists of "safe" pages that will be used
2321 * later. On systems with high memory a list of "safe" highmem pages is
2322 * also created.
2323 */
2324
2325 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2326
2327 static int
2328 prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2329 {
2330 unsigned int nr_pages, nr_highmem;
2331 struct linked_page *sp_list, *lp;
2332 int error;
2333
2334 /* If there is no highmem, the buffer will not be necessary */
2335 free_image_page(buffer, PG_UNSAFE_CLEAR);
2336 buffer = NULL;
2337
2338 nr_highmem = count_highmem_image_pages(bm);
2339 error = mark_unsafe_pages(bm);
2340 if (error)
2341 goto Free;
2342
2343 error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2344 if (error)
2345 goto Free;
2346
2347 duplicate_memory_bitmap(new_bm, bm);
2348 memory_bm_free(bm, PG_UNSAFE_KEEP);
2349 if (nr_highmem > 0) {
2350 error = prepare_highmem_image(bm, &nr_highmem);
2351 if (error)
2352 goto Free;
2353 }
2354 /* Reserve some safe pages for potential later use.
2355 *
2356 * NOTE: This way we make sure there will be enough safe pages for the
2357 * chain_alloc() in get_buffer(). It is a bit wasteful, but
2358 * nr_copy_pages cannot be greater than 50% of the memory anyway.
2359 */
2360 sp_list = NULL;
2361 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
2362 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2363 nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2364 while (nr_pages > 0) {
2365 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2366 if (!lp) {
2367 error = -ENOMEM;
2368 goto Free;
2369 }
2370 lp->next = sp_list;
2371 sp_list = lp;
2372 nr_pages--;
2373 }
2374 /* Preallocate memory for the image */
2375 safe_pages_list = NULL;
2376 nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2377 while (nr_pages > 0) {
2378 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2379 if (!lp) {
2380 error = -ENOMEM;
2381 goto Free;
2382 }
2383 if (!swsusp_page_is_free(virt_to_page(lp))) {
2384 /* The page is "safe", add it to the list */
2385 lp->next = safe_pages_list;
2386 safe_pages_list = lp;
2387 }
2388 /* Mark the page as allocated */
2389 swsusp_set_page_forbidden(virt_to_page(lp));
2390 swsusp_set_page_free(virt_to_page(lp));
2391 nr_pages--;
2392 }
2393 /* Free the reserved safe pages so that chain_alloc() can use them */
2394 while (sp_list) {
2395 lp = sp_list->next;
2396 free_image_page(sp_list, PG_UNSAFE_CLEAR);
2397 sp_list = lp;
2398 }
2399 return 0;
2400
2401 Free:
2402 swsusp_free();
2403 return error;
2404 }
2405
2406 /**
2407 * get_buffer - compute the address that snapshot_write_next() should
2408 * set for its caller to write to.
2409 */
2410
2411 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2412 {
2413 struct pbe *pbe;
2414 struct page *page;
2415 unsigned long pfn = memory_bm_next_pfn(bm);
2416
2417 if (pfn == BM_END_OF_MAP)
2418 return ERR_PTR(-EFAULT);
2419
2420 page = pfn_to_page(pfn);
2421 if (PageHighMem(page))
2422 return get_highmem_page_buffer(page, ca);
2423
2424 if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2425 /* We have allocated the "original" page frame and we can
2426 * use it directly to store the loaded page.
2427 */
2428 return page_address(page);
2429
2430 /* The "original" page frame has not been allocated and we have to
2431 * use a "safe" page frame to store the loaded page.
2432 */
2433 pbe = chain_alloc(ca, sizeof(struct pbe));
2434 if (!pbe) {
2435 swsusp_free();
2436 return ERR_PTR(-ENOMEM);
2437 }
2438 pbe->orig_address = page_address(page);
2439 pbe->address = safe_pages_list;
2440 safe_pages_list = safe_pages_list->next;
2441 pbe->next = restore_pblist;
2442 restore_pblist = pbe;
2443 return pbe->address;
2444 }
2445
2446 /**
2447 * snapshot_write_next - used for writing the system memory snapshot.
2448 *
2449 * On the first call to it @handle should point to a zeroed
2450 * snapshot_handle structure. The structure gets updated and a pointer
2451 * to it should be passed to this function every next time.
2452 *
2453 * On success the function returns a positive number. Then, the caller
2454 * is allowed to write up to the returned number of bytes to the memory
2455 * location computed by the data_of() macro.
2456 *
2457 * The function returns 0 to indicate the "end of file" condition,
2458 * and a negative number is returned on error. In such cases the
2459 * structure pointed to by @handle is not updated and should not be used
2460 * any more.
2461 */
2462
2463 int snapshot_write_next(struct snapshot_handle *handle)
2464 {
2465 static struct chain_allocator ca;
2466 int error = 0;
2467
2468 /* Check if we have already loaded the entire image */
2469 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2470 return 0;
2471
2472 handle->sync_read = 1;
2473
2474 if (!handle->cur) {
2475 if (!buffer)
2476 /* This makes the buffer be freed by swsusp_free() */
2477 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2478
2479 if (!buffer)
2480 return -ENOMEM;
2481
2482 handle->buffer = buffer;
2483 } else if (handle->cur == 1) {
2484 error = load_header(buffer);
2485 if (error)
2486 return error;
2487
2488 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2489 if (error)
2490 return error;
2491
2492 /* Allocate buffer for page keys. */
2493 error = page_key_alloc(nr_copy_pages);
2494 if (error)
2495 return error;
2496
2497 } else if (handle->cur <= nr_meta_pages + 1) {
2498 error = unpack_orig_pfns(buffer, &copy_bm);
2499 if (error)
2500 return error;
2501
2502 if (handle->cur == nr_meta_pages + 1) {
2503 error = prepare_image(&orig_bm, &copy_bm);
2504 if (error)
2505 return error;
2506
2507 chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2508 memory_bm_position_reset(&orig_bm);
2509 restore_pblist = NULL;
2510 handle->buffer = get_buffer(&orig_bm, &ca);
2511 handle->sync_read = 0;
2512 if (IS_ERR(handle->buffer))
2513 return PTR_ERR(handle->buffer);
2514 }
2515 } else {
2516 copy_last_highmem_page();
2517 /* Restore page key for data page (s390 only). */
2518 page_key_write(handle->buffer);
2519 handle->buffer = get_buffer(&orig_bm, &ca);
2520 if (IS_ERR(handle->buffer))
2521 return PTR_ERR(handle->buffer);
2522 if (handle->buffer != buffer)
2523 handle->sync_read = 0;
2524 }
2525 handle->cur++;
2526 return PAGE_SIZE;
2527 }
2528
2529 /**
2530 * snapshot_write_finalize - must be called after the last call to
2531 * snapshot_write_next() in case the last page in the image happens
2532 * to be a highmem page and its contents should be stored in the
2533 * highmem. Additionally, it releases the memory that will not be
2534 * used any more.
2535 */
2536
2537 void snapshot_write_finalize(struct snapshot_handle *handle)
2538 {
2539 copy_last_highmem_page();
2540 /* Restore page key for data page (s390 only). */
2541 page_key_write(handle->buffer);
2542 page_key_free();
2543 /* Free only if we have loaded the image entirely */
2544 if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2545 memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
2546 free_highmem_data();
2547 }
2548 }
2549
2550 int snapshot_image_loaded(struct snapshot_handle *handle)
2551 {
2552 return !(!nr_copy_pages || !last_highmem_page_copied() ||
2553 handle->cur <= nr_meta_pages + nr_copy_pages);
2554 }
2555
2556 #ifdef CONFIG_HIGHMEM
2557 /* Assumes that @buf is ready and points to a "safe" page */
2558 static inline void
2559 swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2560 {
2561 void *kaddr1, *kaddr2;
2562
2563 kaddr1 = kmap_atomic(p1);
2564 kaddr2 = kmap_atomic(p2);
2565 copy_page(buf, kaddr1);
2566 copy_page(kaddr1, kaddr2);
2567 copy_page(kaddr2, buf);
2568 kunmap_atomic(kaddr2);
2569 kunmap_atomic(kaddr1);
2570 }
2571
2572 /**
2573 * restore_highmem - for each highmem page that was allocated before
2574 * the suspend and included in the suspend image, and also has been
2575 * allocated by the "resume" kernel swap its current (ie. "before
2576 * resume") contents with the previous (ie. "before suspend") one.
2577 *
2578 * If the resume eventually fails, we can call this function once
2579 * again and restore the "before resume" highmem state.
2580 */
2581
2582 int restore_highmem(void)
2583 {
2584 struct highmem_pbe *pbe = highmem_pblist;
2585 void *buf;
2586
2587 if (!pbe)
2588 return 0;
2589
2590 buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2591 if (!buf)
2592 return -ENOMEM;
2593
2594 while (pbe) {
2595 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2596 pbe = pbe->next;
2597 }
2598 free_image_page(buf, PG_UNSAFE_CLEAR);
2599 return 0;
2600 }
2601 #endif /* CONFIG_HIGHMEM */
This page took 0.117097 seconds and 5 git commands to generate.