2 * linux/kernel/power/snapshot.c
4 * This file provides system snapshot/restore functionality for swsusp.
6 * Copyright (C) 1998-2005 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * This file is released under the GPLv2.
13 #include <linux/version.h>
14 #include <linux/module.h>
16 #include <linux/suspend.h>
17 #include <linux/smp_lock.h>
18 #include <linux/delay.h>
19 #include <linux/bitops.h>
20 #include <linux/spinlock.h>
21 #include <linux/kernel.h>
23 #include <linux/device.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
29 #include <asm/uaccess.h>
30 #include <asm/mmu_context.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlbflush.h>
37 /* List of PBEs needed for restoring the pages that were allocated before
38 * the suspend and included in the suspend image, but have also been
39 * allocated by the "resume" kernel, so their contents cannot be written
40 * directly to their "original" page frames.
42 struct pbe
*restore_pblist
;
44 /* Pointer to an auxiliary buffer (1 page) */
48 * @safe_needed - on resume, for storing the PBE list and the image,
49 * we can only use memory pages that do not conflict with the pages
50 * used before suspend. The unsafe pages have PageNosaveFree set
51 * and we count them using unsafe_pages.
53 * Each allocated image page is marked as PageNosave and PageNosaveFree
54 * so that swsusp_free() can release it.
59 #define PG_UNSAFE_CLEAR 1
60 #define PG_UNSAFE_KEEP 0
62 static unsigned int allocated_unsafe_pages
;
64 static void *get_image_page(gfp_t gfp_mask
, int safe_needed
)
68 res
= (void *)get_zeroed_page(gfp_mask
);
70 while (res
&& PageNosaveFree(virt_to_page(res
))) {
71 /* The page is unsafe, mark it for swsusp_free() */
72 SetPageNosave(virt_to_page(res
));
73 allocated_unsafe_pages
++;
74 res
= (void *)get_zeroed_page(gfp_mask
);
77 SetPageNosave(virt_to_page(res
));
78 SetPageNosaveFree(virt_to_page(res
));
83 unsigned long get_safe_page(gfp_t gfp_mask
)
85 return (unsigned long)get_image_page(gfp_mask
, PG_SAFE
);
88 static struct page
*alloc_image_page(gfp_t gfp_mask
) {
91 page
= alloc_page(gfp_mask
);
94 SetPageNosaveFree(page
);
100 * free_image_page - free page represented by @addr, allocated with
101 * get_image_page (page flags set by it must be cleared)
104 static inline void free_image_page(void *addr
, int clear_nosave_free
)
108 BUG_ON(!virt_addr_valid(addr
));
110 page
= virt_to_page(addr
);
112 ClearPageNosave(page
);
113 if (clear_nosave_free
)
114 ClearPageNosaveFree(page
);
119 /* struct linked_page is used to build chains of pages */
121 #define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
124 struct linked_page
*next
;
125 char data
[LINKED_PAGE_DATA_SIZE
];
126 } __attribute__((packed
));
129 free_list_of_pages(struct linked_page
*list
, int clear_page_nosave
)
132 struct linked_page
*lp
= list
->next
;
134 free_image_page(list
, clear_page_nosave
);
140 * struct chain_allocator is used for allocating small objects out of
141 * a linked list of pages called 'the chain'.
143 * The chain grows each time when there is no room for a new object in
144 * the current page. The allocated objects cannot be freed individually.
145 * It is only possible to free them all at once, by freeing the entire
148 * NOTE: The chain allocator may be inefficient if the allocated objects
149 * are not much smaller than PAGE_SIZE.
152 struct chain_allocator
{
153 struct linked_page
*chain
; /* the chain */
154 unsigned int used_space
; /* total size of objects allocated out
155 * of the current page
157 gfp_t gfp_mask
; /* mask for allocating pages */
158 int safe_needed
; /* if set, only "safe" pages are allocated */
162 chain_init(struct chain_allocator
*ca
, gfp_t gfp_mask
, int safe_needed
)
165 ca
->used_space
= LINKED_PAGE_DATA_SIZE
;
166 ca
->gfp_mask
= gfp_mask
;
167 ca
->safe_needed
= safe_needed
;
170 static void *chain_alloc(struct chain_allocator
*ca
, unsigned int size
)
174 if (LINKED_PAGE_DATA_SIZE
- ca
->used_space
< size
) {
175 struct linked_page
*lp
;
177 lp
= get_image_page(ca
->gfp_mask
, ca
->safe_needed
);
181 lp
->next
= ca
->chain
;
185 ret
= ca
->chain
->data
+ ca
->used_space
;
186 ca
->used_space
+= size
;
190 static void chain_free(struct chain_allocator
*ca
, int clear_page_nosave
)
192 free_list_of_pages(ca
->chain
, clear_page_nosave
);
193 memset(ca
, 0, sizeof(struct chain_allocator
));
197 * Data types related to memory bitmaps.
199 * Memory bitmap is a structure consiting of many linked lists of
200 * objects. The main list's elements are of type struct zone_bitmap
201 * and each of them corresonds to one zone. For each zone bitmap
202 * object there is a list of objects of type struct bm_block that
203 * represent each blocks of bit chunks in which information is
206 * struct memory_bitmap contains a pointer to the main list of zone
207 * bitmap objects, a struct bm_position used for browsing the bitmap,
208 * and a pointer to the list of pages used for allocating all of the
209 * zone bitmap objects and bitmap block objects.
211 * NOTE: It has to be possible to lay out the bitmap in memory
212 * using only allocations of order 0. Additionally, the bitmap is
213 * designed to work with arbitrary number of zones (this is over the
214 * top for now, but let's avoid making unnecessary assumptions ;-).
216 * struct zone_bitmap contains a pointer to a list of bitmap block
217 * objects and a pointer to the bitmap block object that has been
218 * most recently used for setting bits. Additionally, it contains the
219 * pfns that correspond to the start and end of the represented zone.
221 * struct bm_block contains a pointer to the memory page in which
222 * information is stored (in the form of a block of bit chunks
223 * of type unsigned long each). It also contains the pfns that
224 * correspond to the start and end of the represented memory area and
225 * the number of bit chunks in the block.
227 * NOTE: Memory bitmaps are used for two types of operations only:
228 * "set a bit" and "find the next bit set". Moreover, the searching
229 * is always carried out after all of the "set a bit" operations
233 #define BM_END_OF_MAP (~0UL)
235 #define BM_CHUNKS_PER_BLOCK (PAGE_SIZE / sizeof(long))
236 #define BM_BITS_PER_CHUNK (sizeof(long) << 3)
237 #define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
240 struct bm_block
*next
; /* next element of the list */
241 unsigned long start_pfn
; /* pfn represented by the first bit */
242 unsigned long end_pfn
; /* pfn represented by the last bit plus 1 */
243 unsigned int size
; /* number of bit chunks */
244 unsigned long *data
; /* chunks of bits representing pages */
248 struct zone_bitmap
*next
; /* next element of the list */
249 unsigned long start_pfn
; /* minimal pfn in this zone */
250 unsigned long end_pfn
; /* maximal pfn in this zone plus 1 */
251 struct bm_block
*bm_blocks
; /* list of bitmap blocks */
252 struct bm_block
*cur_block
; /* recently used bitmap block */
255 /* strcut bm_position is used for browsing memory bitmaps */
258 struct zone_bitmap
*zone_bm
;
259 struct bm_block
*block
;
264 struct memory_bitmap
{
265 struct zone_bitmap
*zone_bm_list
; /* list of zone bitmaps */
266 struct linked_page
*p_list
; /* list of pages used to store zone
267 * bitmap objects and bitmap block
270 struct bm_position cur
; /* most recently used bit position */
273 /* Functions that operate on memory bitmaps */
275 static inline void memory_bm_reset_chunk(struct memory_bitmap
*bm
)
281 static void memory_bm_position_reset(struct memory_bitmap
*bm
)
283 struct zone_bitmap
*zone_bm
;
285 zone_bm
= bm
->zone_bm_list
;
286 bm
->cur
.zone_bm
= zone_bm
;
287 bm
->cur
.block
= zone_bm
->bm_blocks
;
288 memory_bm_reset_chunk(bm
);
291 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
);
294 * create_bm_block_list - create a list of block bitmap objects
297 static inline struct bm_block
*
298 create_bm_block_list(unsigned int nr_blocks
, struct chain_allocator
*ca
)
300 struct bm_block
*bblist
= NULL
;
302 while (nr_blocks
-- > 0) {
305 bb
= chain_alloc(ca
, sizeof(struct bm_block
));
316 * create_zone_bm_list - create a list of zone bitmap objects
319 static inline struct zone_bitmap
*
320 create_zone_bm_list(unsigned int nr_zones
, struct chain_allocator
*ca
)
322 struct zone_bitmap
*zbmlist
= NULL
;
324 while (nr_zones
-- > 0) {
325 struct zone_bitmap
*zbm
;
327 zbm
= chain_alloc(ca
, sizeof(struct zone_bitmap
));
338 * memory_bm_create - allocate memory for a memory bitmap
342 memory_bm_create(struct memory_bitmap
*bm
, gfp_t gfp_mask
, int safe_needed
)
344 struct chain_allocator ca
;
346 struct zone_bitmap
*zone_bm
;
350 chain_init(&ca
, gfp_mask
, safe_needed
);
352 /* Compute the number of zones */
355 if (populated_zone(zone
))
358 /* Allocate the list of zones bitmap objects */
359 zone_bm
= create_zone_bm_list(nr
, &ca
);
360 bm
->zone_bm_list
= zone_bm
;
362 chain_free(&ca
, PG_UNSAFE_CLEAR
);
366 /* Initialize the zone bitmap objects */
367 for_each_zone(zone
) {
370 if (!populated_zone(zone
))
373 zone_bm
->start_pfn
= zone
->zone_start_pfn
;
374 zone_bm
->end_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
375 /* Allocate the list of bitmap block objects */
376 nr
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
377 bb
= create_bm_block_list(nr
, &ca
);
378 zone_bm
->bm_blocks
= bb
;
379 zone_bm
->cur_block
= bb
;
383 nr
= zone
->spanned_pages
;
384 pfn
= zone
->zone_start_pfn
;
385 /* Initialize the bitmap block objects */
389 ptr
= get_image_page(gfp_mask
, safe_needed
);
395 if (nr
>= BM_BITS_PER_BLOCK
) {
396 pfn
+= BM_BITS_PER_BLOCK
;
397 bb
->size
= BM_CHUNKS_PER_BLOCK
;
398 nr
-= BM_BITS_PER_BLOCK
;
400 /* This is executed only once in the loop */
402 bb
->size
= DIV_ROUND_UP(nr
, BM_BITS_PER_CHUNK
);
407 zone_bm
= zone_bm
->next
;
409 bm
->p_list
= ca
.chain
;
410 memory_bm_position_reset(bm
);
414 bm
->p_list
= ca
.chain
;
415 memory_bm_free(bm
, PG_UNSAFE_CLEAR
);
420 * memory_bm_free - free memory occupied by the memory bitmap @bm
423 static void memory_bm_free(struct memory_bitmap
*bm
, int clear_nosave_free
)
425 struct zone_bitmap
*zone_bm
;
427 /* Free the list of bit blocks for each zone_bitmap object */
428 zone_bm
= bm
->zone_bm_list
;
432 bb
= zone_bm
->bm_blocks
;
435 free_image_page(bb
->data
, clear_nosave_free
);
438 zone_bm
= zone_bm
->next
;
440 free_list_of_pages(bm
->p_list
, clear_nosave_free
);
441 bm
->zone_bm_list
= NULL
;
445 * memory_bm_set_bit - set the bit in the bitmap @bm that corresponds
446 * to given pfn. The cur_zone_bm member of @bm and the cur_block member
447 * of @bm->cur_zone_bm are updated.
449 * If the bit cannot be set, the function returns -EINVAL .
453 memory_bm_set_bit(struct memory_bitmap
*bm
, unsigned long pfn
)
455 struct zone_bitmap
*zone_bm
;
458 /* Check if the pfn is from the current zone */
459 zone_bm
= bm
->cur
.zone_bm
;
460 if (pfn
< zone_bm
->start_pfn
|| pfn
>= zone_bm
->end_pfn
) {
461 zone_bm
= bm
->zone_bm_list
;
462 /* We don't assume that the zones are sorted by pfns */
463 while (pfn
< zone_bm
->start_pfn
|| pfn
>= zone_bm
->end_pfn
) {
464 zone_bm
= zone_bm
->next
;
465 if (unlikely(!zone_bm
))
468 bm
->cur
.zone_bm
= zone_bm
;
470 /* Check if the pfn corresponds to the current bitmap block */
471 bb
= zone_bm
->cur_block
;
472 if (pfn
< bb
->start_pfn
)
473 bb
= zone_bm
->bm_blocks
;
475 while (pfn
>= bb
->end_pfn
) {
480 zone_bm
->cur_block
= bb
;
481 pfn
-= bb
->start_pfn
;
482 set_bit(pfn
% BM_BITS_PER_CHUNK
, bb
->data
+ pfn
/ BM_BITS_PER_CHUNK
);
486 /* Two auxiliary functions for memory_bm_next_pfn */
488 /* Find the first set bit in the given chunk, if there is one */
490 static inline int next_bit_in_chunk(int bit
, unsigned long *chunk_p
)
493 while (bit
< BM_BITS_PER_CHUNK
) {
494 if (test_bit(bit
, chunk_p
))
502 /* Find a chunk containing some bits set in given block of bits */
504 static inline int next_chunk_in_block(int n
, struct bm_block
*bb
)
507 while (n
< bb
->size
) {
517 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit
518 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is
521 * It is required to run memory_bm_position_reset() before the first call to
525 static unsigned long memory_bm_next_pfn(struct memory_bitmap
*bm
)
527 struct zone_bitmap
*zone_bm
;
535 chunk
= bm
->cur
.chunk
;
538 bit
= next_bit_in_chunk(bit
, bb
->data
+ chunk
);
542 chunk
= next_chunk_in_block(chunk
, bb
);
544 } while (chunk
>= 0);
547 memory_bm_reset_chunk(bm
);
549 zone_bm
= bm
->cur
.zone_bm
->next
;
551 bm
->cur
.zone_bm
= zone_bm
;
552 bm
->cur
.block
= zone_bm
->bm_blocks
;
553 memory_bm_reset_chunk(bm
);
556 memory_bm_position_reset(bm
);
557 return BM_END_OF_MAP
;
560 bm
->cur
.chunk
= chunk
;
562 return bb
->start_pfn
+ chunk
* BM_BITS_PER_CHUNK
+ bit
;
566 * snapshot_additional_pages - estimate the number of additional pages
567 * be needed for setting up the suspend image data structures for given
568 * zone (usually the returned value is greater than the exact number)
571 unsigned int snapshot_additional_pages(struct zone
*zone
)
575 res
= DIV_ROUND_UP(zone
->spanned_pages
, BM_BITS_PER_BLOCK
);
576 res
+= DIV_ROUND_UP(res
* sizeof(struct bm_block
), PAGE_SIZE
);
580 #ifdef CONFIG_HIGHMEM
582 * count_free_highmem_pages - compute the total number of free highmem
583 * pages, system-wide.
586 static unsigned int count_free_highmem_pages(void)
589 unsigned int cnt
= 0;
592 if (populated_zone(zone
) && is_highmem(zone
))
593 cnt
+= zone
->free_pages
;
599 * saveable_highmem_page - Determine whether a highmem page should be
600 * included in the suspend image.
602 * We should save the page if it isn't Nosave or NosaveFree, or Reserved,
603 * and it isn't a part of a free chunk of pages.
606 static struct page
*saveable_highmem_page(unsigned long pfn
)
613 page
= pfn_to_page(pfn
);
615 BUG_ON(!PageHighMem(page
));
617 if (PageNosave(page
) || PageReserved(page
) || PageNosaveFree(page
))
624 * count_highmem_pages - compute the total number of saveable highmem
628 unsigned int count_highmem_pages(void)
633 for_each_zone(zone
) {
634 unsigned long pfn
, max_zone_pfn
;
636 if (!is_highmem(zone
))
639 mark_free_pages(zone
);
640 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
641 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
642 if (saveable_highmem_page(pfn
))
648 static inline void *saveable_highmem_page(unsigned long pfn
) { return NULL
; }
649 static inline unsigned int count_highmem_pages(void) { return 0; }
650 #endif /* CONFIG_HIGHMEM */
653 * pfn_is_nosave - check if given pfn is in the 'nosave' section
656 static inline int pfn_is_nosave(unsigned long pfn
)
658 unsigned long nosave_begin_pfn
= __pa(&__nosave_begin
) >> PAGE_SHIFT
;
659 unsigned long nosave_end_pfn
= PAGE_ALIGN(__pa(&__nosave_end
)) >> PAGE_SHIFT
;
660 return (pfn
>= nosave_begin_pfn
) && (pfn
< nosave_end_pfn
);
664 * saveable - Determine whether a non-highmem page should be included in
667 * We should save the page if it isn't Nosave, and is not in the range
668 * of pages statically defined as 'unsaveable', and it isn't a part of
669 * a free chunk of pages.
672 static struct page
*saveable_page(unsigned long pfn
)
679 page
= pfn_to_page(pfn
);
681 BUG_ON(PageHighMem(page
));
683 if (PageNosave(page
) || PageNosaveFree(page
))
686 if (PageReserved(page
) && pfn_is_nosave(pfn
))
693 * count_data_pages - compute the total number of saveable non-highmem
697 unsigned int count_data_pages(void)
700 unsigned long pfn
, max_zone_pfn
;
703 for_each_zone(zone
) {
704 if (is_highmem(zone
))
707 mark_free_pages(zone
);
708 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
709 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
710 if(saveable_page(pfn
))
716 /* This is needed, because copy_page and memcpy are not usable for copying
719 static inline void do_copy_page(long *dst
, long *src
)
723 for (n
= PAGE_SIZE
/ sizeof(long); n
; n
--)
727 #ifdef CONFIG_HIGHMEM
728 static inline struct page
*
729 page_is_saveable(struct zone
*zone
, unsigned long pfn
)
731 return is_highmem(zone
) ?
732 saveable_highmem_page(pfn
) : saveable_page(pfn
);
736 copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
738 struct page
*s_page
, *d_page
;
741 s_page
= pfn_to_page(src_pfn
);
742 d_page
= pfn_to_page(dst_pfn
);
743 if (PageHighMem(s_page
)) {
744 src
= kmap_atomic(s_page
, KM_USER0
);
745 dst
= kmap_atomic(d_page
, KM_USER1
);
746 do_copy_page(dst
, src
);
747 kunmap_atomic(src
, KM_USER0
);
748 kunmap_atomic(dst
, KM_USER1
);
750 src
= page_address(s_page
);
751 if (PageHighMem(d_page
)) {
752 /* Page pointed to by src may contain some kernel
753 * data modified by kmap_atomic()
755 do_copy_page(buffer
, src
);
756 dst
= kmap_atomic(pfn_to_page(dst_pfn
), KM_USER0
);
757 memcpy(dst
, buffer
, PAGE_SIZE
);
758 kunmap_atomic(dst
, KM_USER0
);
760 dst
= page_address(d_page
);
761 do_copy_page(dst
, src
);
766 #define page_is_saveable(zone, pfn) saveable_page(pfn)
769 copy_data_page(unsigned long dst_pfn
, unsigned long src_pfn
)
771 do_copy_page(page_address(pfn_to_page(dst_pfn
)),
772 page_address(pfn_to_page(src_pfn
)));
774 #endif /* CONFIG_HIGHMEM */
777 copy_data_pages(struct memory_bitmap
*copy_bm
, struct memory_bitmap
*orig_bm
)
782 for_each_zone(zone
) {
783 unsigned long max_zone_pfn
;
785 mark_free_pages(zone
);
786 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
787 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
788 if (page_is_saveable(zone
, pfn
))
789 memory_bm_set_bit(orig_bm
, pfn
);
791 memory_bm_position_reset(orig_bm
);
792 memory_bm_position_reset(copy_bm
);
794 pfn
= memory_bm_next_pfn(orig_bm
);
795 if (likely(pfn
!= BM_END_OF_MAP
))
796 copy_data_page(memory_bm_next_pfn(copy_bm
), pfn
);
797 } while (pfn
!= BM_END_OF_MAP
);
800 /* Total number of image pages */
801 static unsigned int nr_copy_pages
;
802 /* Number of pages needed for saving the original pfns of the image pages */
803 static unsigned int nr_meta_pages
;
806 * swsusp_free - free pages allocated for the suspend.
808 * Suspend pages are alocated before the atomic copy is made, so we
809 * need to release them after the resume.
812 void swsusp_free(void)
815 unsigned long pfn
, max_zone_pfn
;
817 for_each_zone(zone
) {
818 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
819 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
820 if (pfn_valid(pfn
)) {
821 struct page
*page
= pfn_to_page(pfn
);
823 if (PageNosave(page
) && PageNosaveFree(page
)) {
824 ClearPageNosave(page
);
825 ClearPageNosaveFree(page
);
832 restore_pblist
= NULL
;
836 #ifdef CONFIG_HIGHMEM
838 * count_pages_for_highmem - compute the number of non-highmem pages
839 * that will be necessary for creating copies of highmem pages.
842 static unsigned int count_pages_for_highmem(unsigned int nr_highmem
)
844 unsigned int free_highmem
= count_free_highmem_pages();
846 if (free_highmem
>= nr_highmem
)
849 nr_highmem
-= free_highmem
;
855 count_pages_for_highmem(unsigned int nr_highmem
) { return 0; }
856 #endif /* CONFIG_HIGHMEM */
859 * enough_free_mem - Make sure we have enough free memory for the
863 static int enough_free_mem(unsigned int nr_pages
, unsigned int nr_highmem
)
866 unsigned int free
= 0, meta
= 0;
868 for_each_zone(zone
) {
869 meta
+= snapshot_additional_pages(zone
);
870 if (!is_highmem(zone
))
871 free
+= zone
->free_pages
;
874 nr_pages
+= count_pages_for_highmem(nr_highmem
);
875 pr_debug("swsusp: Normal pages needed: %u + %u + %u, available pages: %u\n",
876 nr_pages
, PAGES_FOR_IO
, meta
, free
);
878 return free
> nr_pages
+ PAGES_FOR_IO
+ meta
;
881 #ifdef CONFIG_HIGHMEM
883 * get_highmem_buffer - if there are some highmem pages in the suspend
884 * image, we may need the buffer to copy them and/or load their data.
887 static inline int get_highmem_buffer(int safe_needed
)
889 buffer
= get_image_page(GFP_ATOMIC
| __GFP_COLD
, safe_needed
);
890 return buffer
? 0 : -ENOMEM
;
894 * alloc_highmem_image_pages - allocate some highmem pages for the image.
895 * Try to allocate as many pages as needed, but if the number of free
896 * highmem pages is lesser than that, allocate them all.
899 static inline unsigned int
900 alloc_highmem_image_pages(struct memory_bitmap
*bm
, unsigned int nr_highmem
)
902 unsigned int to_alloc
= count_free_highmem_pages();
904 if (to_alloc
> nr_highmem
)
905 to_alloc
= nr_highmem
;
907 nr_highmem
-= to_alloc
;
908 while (to_alloc
-- > 0) {
911 page
= alloc_image_page(__GFP_HIGHMEM
);
912 memory_bm_set_bit(bm
, page_to_pfn(page
));
917 static inline int get_highmem_buffer(int safe_needed
) { return 0; }
919 static inline unsigned int
920 alloc_highmem_image_pages(struct memory_bitmap
*bm
, unsigned int n
) { return 0; }
921 #endif /* CONFIG_HIGHMEM */
924 * swsusp_alloc - allocate memory for the suspend image
926 * We first try to allocate as many highmem pages as there are
927 * saveable highmem pages in the system. If that fails, we allocate
928 * non-highmem pages for the copies of the remaining highmem ones.
930 * In this approach it is likely that the copies of highmem pages will
931 * also be located in the high memory, because of the way in which
932 * copy_data_pages() works.
936 swsusp_alloc(struct memory_bitmap
*orig_bm
, struct memory_bitmap
*copy_bm
,
937 unsigned int nr_pages
, unsigned int nr_highmem
)
941 error
= memory_bm_create(orig_bm
, GFP_ATOMIC
| __GFP_COLD
, PG_ANY
);
945 error
= memory_bm_create(copy_bm
, GFP_ATOMIC
| __GFP_COLD
, PG_ANY
);
949 if (nr_highmem
> 0) {
950 error
= get_highmem_buffer(PG_ANY
);
954 nr_pages
+= alloc_highmem_image_pages(copy_bm
, nr_highmem
);
956 while (nr_pages
-- > 0) {
957 struct page
*page
= alloc_image_page(GFP_ATOMIC
| __GFP_COLD
);
962 memory_bm_set_bit(copy_bm
, page_to_pfn(page
));
971 /* Memory bitmap used for marking saveable pages (during suspend) or the
972 * suspend image pages (during resume)
974 static struct memory_bitmap orig_bm
;
975 /* Memory bitmap used on suspend for marking allocated pages that will contain
976 * the copies of saveable pages. During resume it is initially used for
977 * marking the suspend image pages, but then its set bits are duplicated in
978 * @orig_bm and it is released. Next, on systems with high memory, it may be
979 * used for marking "safe" highmem pages, but it has to be reinitialized for
982 static struct memory_bitmap copy_bm
;
984 asmlinkage
int swsusp_save(void)
986 unsigned int nr_pages
, nr_highmem
;
988 printk("swsusp: critical section: \n");
991 nr_pages
= count_data_pages();
992 nr_highmem
= count_highmem_pages();
993 printk("swsusp: Need to copy %u pages\n", nr_pages
+ nr_highmem
);
995 if (!enough_free_mem(nr_pages
, nr_highmem
)) {
996 printk(KERN_ERR
"swsusp: Not enough free memory\n");
1000 if (swsusp_alloc(&orig_bm
, ©_bm
, nr_pages
, nr_highmem
)) {
1001 printk(KERN_ERR
"swsusp: Memory allocation failed\n");
1005 /* During allocating of suspend pagedir, new cold pages may appear.
1008 drain_local_pages();
1009 copy_data_pages(©_bm
, &orig_bm
);
1012 * End of critical section. From now on, we can write to memory,
1013 * but we should not touch disk. This specially means we must _not_
1014 * touch swap space! Except we must write out our image of course.
1017 nr_pages
+= nr_highmem
;
1018 nr_copy_pages
= nr_pages
;
1019 nr_meta_pages
= DIV_ROUND_UP(nr_pages
* sizeof(long), PAGE_SIZE
);
1021 printk("swsusp: critical section/: done (%d pages copied)\n", nr_pages
);
1026 static void init_header(struct swsusp_info
*info
)
1028 memset(info
, 0, sizeof(struct swsusp_info
));
1029 info
->version_code
= LINUX_VERSION_CODE
;
1030 info
->num_physpages
= num_physpages
;
1031 memcpy(&info
->uts
, init_utsname(), sizeof(struct new_utsname
));
1032 info
->cpus
= num_online_cpus();
1033 info
->image_pages
= nr_copy_pages
;
1034 info
->pages
= nr_copy_pages
+ nr_meta_pages
+ 1;
1035 info
->size
= info
->pages
;
1036 info
->size
<<= PAGE_SHIFT
;
1040 * pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1041 * are stored in the array @buf[] (1 page at a time)
1045 pack_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1049 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1050 buf
[j
] = memory_bm_next_pfn(bm
);
1051 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1057 * snapshot_read_next - used for reading the system memory snapshot.
1059 * On the first call to it @handle should point to a zeroed
1060 * snapshot_handle structure. The structure gets updated and a pointer
1061 * to it should be passed to this function every next time.
1063 * The @count parameter should contain the number of bytes the caller
1064 * wants to read from the snapshot. It must not be zero.
1066 * On success the function returns a positive number. Then, the caller
1067 * is allowed to read up to the returned number of bytes from the memory
1068 * location computed by the data_of() macro. The number returned
1069 * may be smaller than @count, but this only happens if the read would
1070 * cross a page boundary otherwise.
1072 * The function returns 0 to indicate the end of data stream condition,
1073 * and a negative number is returned on error. In such cases the
1074 * structure pointed to by @handle is not updated and should not be used
1078 int snapshot_read_next(struct snapshot_handle
*handle
, size_t count
)
1080 if (handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1084 /* This makes the buffer be freed by swsusp_free() */
1085 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1089 if (!handle
->offset
) {
1090 init_header((struct swsusp_info
*)buffer
);
1091 handle
->buffer
= buffer
;
1092 memory_bm_position_reset(&orig_bm
);
1093 memory_bm_position_reset(©_bm
);
1095 if (handle
->prev
< handle
->cur
) {
1096 if (handle
->cur
<= nr_meta_pages
) {
1097 memset(buffer
, 0, PAGE_SIZE
);
1098 pack_pfns(buffer
, &orig_bm
);
1102 page
= pfn_to_page(memory_bm_next_pfn(©_bm
));
1103 if (PageHighMem(page
)) {
1104 /* Highmem pages are copied to the buffer,
1105 * because we can't return with a kmapped
1106 * highmem page (we may not be called again).
1110 kaddr
= kmap_atomic(page
, KM_USER0
);
1111 memcpy(buffer
, kaddr
, PAGE_SIZE
);
1112 kunmap_atomic(kaddr
, KM_USER0
);
1113 handle
->buffer
= buffer
;
1115 handle
->buffer
= page_address(page
);
1118 handle
->prev
= handle
->cur
;
1120 handle
->buf_offset
= handle
->cur_offset
;
1121 if (handle
->cur_offset
+ count
>= PAGE_SIZE
) {
1122 count
= PAGE_SIZE
- handle
->cur_offset
;
1123 handle
->cur_offset
= 0;
1126 handle
->cur_offset
+= count
;
1128 handle
->offset
+= count
;
1133 * mark_unsafe_pages - mark the pages that cannot be used for storing
1134 * the image during resume, because they conflict with the pages that
1135 * had been used before suspend
1138 static int mark_unsafe_pages(struct memory_bitmap
*bm
)
1141 unsigned long pfn
, max_zone_pfn
;
1143 /* Clear page flags */
1144 for_each_zone(zone
) {
1145 max_zone_pfn
= zone
->zone_start_pfn
+ zone
->spanned_pages
;
1146 for (pfn
= zone
->zone_start_pfn
; pfn
< max_zone_pfn
; pfn
++)
1148 ClearPageNosaveFree(pfn_to_page(pfn
));
1151 /* Mark pages that correspond to the "original" pfns as "unsafe" */
1152 memory_bm_position_reset(bm
);
1154 pfn
= memory_bm_next_pfn(bm
);
1155 if (likely(pfn
!= BM_END_OF_MAP
)) {
1156 if (likely(pfn_valid(pfn
)))
1157 SetPageNosaveFree(pfn_to_page(pfn
));
1161 } while (pfn
!= BM_END_OF_MAP
);
1163 allocated_unsafe_pages
= 0;
1169 duplicate_memory_bitmap(struct memory_bitmap
*dst
, struct memory_bitmap
*src
)
1173 memory_bm_position_reset(src
);
1174 pfn
= memory_bm_next_pfn(src
);
1175 while (pfn
!= BM_END_OF_MAP
) {
1176 memory_bm_set_bit(dst
, pfn
);
1177 pfn
= memory_bm_next_pfn(src
);
1181 static inline int check_header(struct swsusp_info
*info
)
1183 char *reason
= NULL
;
1185 if (info
->version_code
!= LINUX_VERSION_CODE
)
1186 reason
= "kernel version";
1187 if (info
->num_physpages
!= num_physpages
)
1188 reason
= "memory size";
1189 if (strcmp(info
->uts
.sysname
,init_utsname()->sysname
))
1190 reason
= "system type";
1191 if (strcmp(info
->uts
.release
,init_utsname()->release
))
1192 reason
= "kernel release";
1193 if (strcmp(info
->uts
.version
,init_utsname()->version
))
1195 if (strcmp(info
->uts
.machine
,init_utsname()->machine
))
1198 printk(KERN_ERR
"swsusp: Resume mismatch: %s\n", reason
);
1205 * load header - check the image header and copy data from it
1209 load_header(struct swsusp_info
*info
)
1213 restore_pblist
= NULL
;
1214 error
= check_header(info
);
1216 nr_copy_pages
= info
->image_pages
;
1217 nr_meta_pages
= info
->pages
- info
->image_pages
- 1;
1223 * unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
1224 * the corresponding bit in the memory bitmap @bm
1228 unpack_orig_pfns(unsigned long *buf
, struct memory_bitmap
*bm
)
1232 for (j
= 0; j
< PAGE_SIZE
/ sizeof(long); j
++) {
1233 if (unlikely(buf
[j
] == BM_END_OF_MAP
))
1236 memory_bm_set_bit(bm
, buf
[j
]);
1240 /* List of "safe" pages that may be used to store data loaded from the suspend
1243 static struct linked_page
*safe_pages_list
;
1245 #ifdef CONFIG_HIGHMEM
1246 /* struct highmem_pbe is used for creating the list of highmem pages that
1247 * should be restored atomically during the resume from disk, because the page
1248 * frames they have occupied before the suspend are in use.
1250 struct highmem_pbe
{
1251 struct page
*copy_page
; /* data is here now */
1252 struct page
*orig_page
; /* data was here before the suspend */
1253 struct highmem_pbe
*next
;
1256 /* List of highmem PBEs needed for restoring the highmem pages that were
1257 * allocated before the suspend and included in the suspend image, but have
1258 * also been allocated by the "resume" kernel, so their contents cannot be
1259 * written directly to their "original" page frames.
1261 static struct highmem_pbe
*highmem_pblist
;
1264 * count_highmem_image_pages - compute the number of highmem pages in the
1265 * suspend image. The bits in the memory bitmap @bm that correspond to the
1266 * image pages are assumed to be set.
1269 static unsigned int count_highmem_image_pages(struct memory_bitmap
*bm
)
1272 unsigned int cnt
= 0;
1274 memory_bm_position_reset(bm
);
1275 pfn
= memory_bm_next_pfn(bm
);
1276 while (pfn
!= BM_END_OF_MAP
) {
1277 if (PageHighMem(pfn_to_page(pfn
)))
1280 pfn
= memory_bm_next_pfn(bm
);
1286 * prepare_highmem_image - try to allocate as many highmem pages as
1287 * there are highmem image pages (@nr_highmem_p points to the variable
1288 * containing the number of highmem image pages). The pages that are
1289 * "safe" (ie. will not be overwritten when the suspend image is
1290 * restored) have the corresponding bits set in @bm (it must be
1293 * NOTE: This function should not be called if there are no highmem
1297 static unsigned int safe_highmem_pages
;
1299 static struct memory_bitmap
*safe_highmem_bm
;
1302 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
1304 unsigned int to_alloc
;
1306 if (memory_bm_create(bm
, GFP_ATOMIC
, PG_SAFE
))
1309 if (get_highmem_buffer(PG_SAFE
))
1312 to_alloc
= count_free_highmem_pages();
1313 if (to_alloc
> *nr_highmem_p
)
1314 to_alloc
= *nr_highmem_p
;
1316 *nr_highmem_p
= to_alloc
;
1318 safe_highmem_pages
= 0;
1319 while (to_alloc
-- > 0) {
1322 page
= alloc_page(__GFP_HIGHMEM
);
1323 if (!PageNosaveFree(page
)) {
1324 /* The page is "safe", set its bit the bitmap */
1325 memory_bm_set_bit(bm
, page_to_pfn(page
));
1326 safe_highmem_pages
++;
1328 /* Mark the page as allocated */
1329 SetPageNosave(page
);
1330 SetPageNosaveFree(page
);
1332 memory_bm_position_reset(bm
);
1333 safe_highmem_bm
= bm
;
1338 * get_highmem_page_buffer - for given highmem image page find the buffer
1339 * that suspend_write_next() should set for its caller to write to.
1341 * If the page is to be saved to its "original" page frame or a copy of
1342 * the page is to be made in the highmem, @buffer is returned. Otherwise,
1343 * the copy of the page is to be made in normal memory, so the address of
1344 * the copy is returned.
1346 * If @buffer is returned, the caller of suspend_write_next() will write
1347 * the page's contents to @buffer, so they will have to be copied to the
1348 * right location on the next call to suspend_write_next() and it is done
1349 * with the help of copy_last_highmem_page(). For this purpose, if
1350 * @buffer is returned, @last_highmem page is set to the page to which
1351 * the data will have to be copied from @buffer.
1354 static struct page
*last_highmem_page
;
1357 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
1359 struct highmem_pbe
*pbe
;
1362 if (PageNosave(page
) && PageNosaveFree(page
)) {
1363 /* We have allocated the "original" page frame and we can
1364 * use it directly to store the loaded page.
1366 last_highmem_page
= page
;
1369 /* The "original" page frame has not been allocated and we have to
1370 * use a "safe" page frame to store the loaded page.
1372 pbe
= chain_alloc(ca
, sizeof(struct highmem_pbe
));
1377 pbe
->orig_page
= page
;
1378 if (safe_highmem_pages
> 0) {
1381 /* Copy of the page will be stored in high memory */
1383 tmp
= pfn_to_page(memory_bm_next_pfn(safe_highmem_bm
));
1384 safe_highmem_pages
--;
1385 last_highmem_page
= tmp
;
1386 pbe
->copy_page
= tmp
;
1388 /* Copy of the page will be stored in normal memory */
1389 kaddr
= safe_pages_list
;
1390 safe_pages_list
= safe_pages_list
->next
;
1391 pbe
->copy_page
= virt_to_page(kaddr
);
1393 pbe
->next
= highmem_pblist
;
1394 highmem_pblist
= pbe
;
1399 * copy_last_highmem_page - copy the contents of a highmem image from
1400 * @buffer, where the caller of snapshot_write_next() has place them,
1401 * to the right location represented by @last_highmem_page .
1404 static void copy_last_highmem_page(void)
1406 if (last_highmem_page
) {
1409 dst
= kmap_atomic(last_highmem_page
, KM_USER0
);
1410 memcpy(dst
, buffer
, PAGE_SIZE
);
1411 kunmap_atomic(dst
, KM_USER0
);
1412 last_highmem_page
= NULL
;
1416 static inline int last_highmem_page_copied(void)
1418 return !last_highmem_page
;
1421 static inline void free_highmem_data(void)
1423 if (safe_highmem_bm
)
1424 memory_bm_free(safe_highmem_bm
, PG_UNSAFE_CLEAR
);
1427 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
1430 static inline int get_safe_write_buffer(void) { return 0; }
1433 count_highmem_image_pages(struct memory_bitmap
*bm
) { return 0; }
1436 prepare_highmem_image(struct memory_bitmap
*bm
, unsigned int *nr_highmem_p
)
1441 static inline void *
1442 get_highmem_page_buffer(struct page
*page
, struct chain_allocator
*ca
)
1447 static inline void copy_last_highmem_page(void) {}
1448 static inline int last_highmem_page_copied(void) { return 1; }
1449 static inline void free_highmem_data(void) {}
1450 #endif /* CONFIG_HIGHMEM */
1453 * prepare_image - use the memory bitmap @bm to mark the pages that will
1454 * be overwritten in the process of restoring the system memory state
1455 * from the suspend image ("unsafe" pages) and allocate memory for the
1458 * The idea is to allocate a new memory bitmap first and then allocate
1459 * as many pages as needed for the image data, but not to assign these
1460 * pages to specific tasks initially. Instead, we just mark them as
1461 * allocated and create a lists of "safe" pages that will be used
1462 * later. On systems with high memory a list of "safe" highmem pages is
1466 #define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
1469 prepare_image(struct memory_bitmap
*new_bm
, struct memory_bitmap
*bm
)
1471 unsigned int nr_pages
, nr_highmem
;
1472 struct linked_page
*sp_list
, *lp
;
1475 /* If there is no highmem, the buffer will not be necessary */
1476 free_image_page(buffer
, PG_UNSAFE_CLEAR
);
1479 nr_highmem
= count_highmem_image_pages(bm
);
1480 error
= mark_unsafe_pages(bm
);
1484 error
= memory_bm_create(new_bm
, GFP_ATOMIC
, PG_SAFE
);
1488 duplicate_memory_bitmap(new_bm
, bm
);
1489 memory_bm_free(bm
, PG_UNSAFE_KEEP
);
1490 if (nr_highmem
> 0) {
1491 error
= prepare_highmem_image(bm
, &nr_highmem
);
1495 /* Reserve some safe pages for potential later use.
1497 * NOTE: This way we make sure there will be enough safe pages for the
1498 * chain_alloc() in get_buffer(). It is a bit wasteful, but
1499 * nr_copy_pages cannot be greater than 50% of the memory anyway.
1502 /* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
1503 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
1504 nr_pages
= DIV_ROUND_UP(nr_pages
, PBES_PER_LINKED_PAGE
);
1505 while (nr_pages
> 0) {
1506 lp
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
1515 /* Preallocate memory for the image */
1516 safe_pages_list
= NULL
;
1517 nr_pages
= nr_copy_pages
- nr_highmem
- allocated_unsafe_pages
;
1518 while (nr_pages
> 0) {
1519 lp
= (struct linked_page
*)get_zeroed_page(GFP_ATOMIC
);
1524 if (!PageNosaveFree(virt_to_page(lp
))) {
1525 /* The page is "safe", add it to the list */
1526 lp
->next
= safe_pages_list
;
1527 safe_pages_list
= lp
;
1529 /* Mark the page as allocated */
1530 SetPageNosave(virt_to_page(lp
));
1531 SetPageNosaveFree(virt_to_page(lp
));
1534 /* Free the reserved safe pages so that chain_alloc() can use them */
1537 free_image_page(sp_list
, PG_UNSAFE_CLEAR
);
1548 * get_buffer - compute the address that snapshot_write_next() should
1549 * set for its caller to write to.
1552 static void *get_buffer(struct memory_bitmap
*bm
, struct chain_allocator
*ca
)
1555 struct page
*page
= pfn_to_page(memory_bm_next_pfn(bm
));
1557 if (PageHighMem(page
))
1558 return get_highmem_page_buffer(page
, ca
);
1560 if (PageNosave(page
) && PageNosaveFree(page
))
1561 /* We have allocated the "original" page frame and we can
1562 * use it directly to store the loaded page.
1564 return page_address(page
);
1566 /* The "original" page frame has not been allocated and we have to
1567 * use a "safe" page frame to store the loaded page.
1569 pbe
= chain_alloc(ca
, sizeof(struct pbe
));
1574 pbe
->orig_address
= page_address(page
);
1575 pbe
->address
= safe_pages_list
;
1576 safe_pages_list
= safe_pages_list
->next
;
1577 pbe
->next
= restore_pblist
;
1578 restore_pblist
= pbe
;
1579 return pbe
->address
;
1583 * snapshot_write_next - used for writing the system memory snapshot.
1585 * On the first call to it @handle should point to a zeroed
1586 * snapshot_handle structure. The structure gets updated and a pointer
1587 * to it should be passed to this function every next time.
1589 * The @count parameter should contain the number of bytes the caller
1590 * wants to write to the image. It must not be zero.
1592 * On success the function returns a positive number. Then, the caller
1593 * is allowed to write up to the returned number of bytes to the memory
1594 * location computed by the data_of() macro. The number returned
1595 * may be smaller than @count, but this only happens if the write would
1596 * cross a page boundary otherwise.
1598 * The function returns 0 to indicate the "end of file" condition,
1599 * and a negative number is returned on error. In such cases the
1600 * structure pointed to by @handle is not updated and should not be used
1604 int snapshot_write_next(struct snapshot_handle
*handle
, size_t count
)
1606 static struct chain_allocator ca
;
1609 /* Check if we have already loaded the entire image */
1610 if (handle
->prev
&& handle
->cur
> nr_meta_pages
+ nr_copy_pages
)
1613 if (handle
->offset
== 0) {
1615 /* This makes the buffer be freed by swsusp_free() */
1616 buffer
= get_image_page(GFP_ATOMIC
, PG_ANY
);
1621 handle
->buffer
= buffer
;
1623 handle
->sync_read
= 1;
1624 if (handle
->prev
< handle
->cur
) {
1625 if (handle
->prev
== 0) {
1626 error
= load_header(buffer
);
1630 error
= memory_bm_create(©_bm
, GFP_ATOMIC
, PG_ANY
);
1634 } else if (handle
->prev
<= nr_meta_pages
) {
1635 unpack_orig_pfns(buffer
, ©_bm
);
1636 if (handle
->prev
== nr_meta_pages
) {
1637 error
= prepare_image(&orig_bm
, ©_bm
);
1641 chain_init(&ca
, GFP_ATOMIC
, PG_SAFE
);
1642 memory_bm_position_reset(&orig_bm
);
1643 restore_pblist
= NULL
;
1644 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
1645 handle
->sync_read
= 0;
1646 if (!handle
->buffer
)
1650 copy_last_highmem_page();
1651 handle
->buffer
= get_buffer(&orig_bm
, &ca
);
1652 if (handle
->buffer
!= buffer
)
1653 handle
->sync_read
= 0;
1655 handle
->prev
= handle
->cur
;
1657 handle
->buf_offset
= handle
->cur_offset
;
1658 if (handle
->cur_offset
+ count
>= PAGE_SIZE
) {
1659 count
= PAGE_SIZE
- handle
->cur_offset
;
1660 handle
->cur_offset
= 0;
1663 handle
->cur_offset
+= count
;
1665 handle
->offset
+= count
;
1670 * snapshot_write_finalize - must be called after the last call to
1671 * snapshot_write_next() in case the last page in the image happens
1672 * to be a highmem page and its contents should be stored in the
1673 * highmem. Additionally, it releases the memory that will not be
1677 void snapshot_write_finalize(struct snapshot_handle
*handle
)
1679 copy_last_highmem_page();
1680 /* Free only if we have loaded the image entirely */
1681 if (handle
->prev
&& handle
->cur
> nr_meta_pages
+ nr_copy_pages
) {
1682 memory_bm_free(&orig_bm
, PG_UNSAFE_CLEAR
);
1683 free_highmem_data();
1687 int snapshot_image_loaded(struct snapshot_handle
*handle
)
1689 return !(!nr_copy_pages
|| !last_highmem_page_copied() ||
1690 handle
->cur
<= nr_meta_pages
+ nr_copy_pages
);
1693 #ifdef CONFIG_HIGHMEM
1694 /* Assumes that @buf is ready and points to a "safe" page */
1696 swap_two_pages_data(struct page
*p1
, struct page
*p2
, void *buf
)
1698 void *kaddr1
, *kaddr2
;
1700 kaddr1
= kmap_atomic(p1
, KM_USER0
);
1701 kaddr2
= kmap_atomic(p2
, KM_USER1
);
1702 memcpy(buf
, kaddr1
, PAGE_SIZE
);
1703 memcpy(kaddr1
, kaddr2
, PAGE_SIZE
);
1704 memcpy(kaddr2
, buf
, PAGE_SIZE
);
1705 kunmap_atomic(kaddr1
, KM_USER0
);
1706 kunmap_atomic(kaddr2
, KM_USER1
);
1710 * restore_highmem - for each highmem page that was allocated before
1711 * the suspend and included in the suspend image, and also has been
1712 * allocated by the "resume" kernel swap its current (ie. "before
1713 * resume") contents with the previous (ie. "before suspend") one.
1715 * If the resume eventually fails, we can call this function once
1716 * again and restore the "before resume" highmem state.
1719 int restore_highmem(void)
1721 struct highmem_pbe
*pbe
= highmem_pblist
;
1727 buf
= get_image_page(GFP_ATOMIC
, PG_SAFE
);
1732 swap_two_pages_data(pbe
->copy_page
, pbe
->orig_page
, buf
);
1735 free_image_page(buf
, PG_UNSAFE_CLEAR
);
1738 #endif /* CONFIG_HIGHMEM */
This page took 0.085814 seconds and 6 git commands to generate.