block: make /sys/block/<dev>/queue/discard_max_bytes writeable
[deliverable/linux.git] / kernel / power / swap.c
CommitLineData
61159a31
RW
1/*
2 * linux/kernel/power/swap.c
3 *
4 * This file provides functions for reading the suspend image from
5 * and writing it to a swap partition.
6 *
a2531293 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
61159a31 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
5a21d489 9 * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
61159a31
RW
10 *
11 * This file is released under the GPLv2.
12 *
13 */
14
15#include <linux/module.h>
61159a31 16#include <linux/file.h>
61159a31
RW
17#include <linux/delay.h>
18#include <linux/bitops.h>
19#include <linux/genhd.h>
20#include <linux/device.h>
61159a31 21#include <linux/bio.h>
546e0d27 22#include <linux/blkdev.h>
61159a31
RW
23#include <linux/swap.h>
24#include <linux/swapops.h>
25#include <linux/pm.h>
5a0e3ad6 26#include <linux/slab.h>
f996fc96
BS
27#include <linux/lzo.h>
28#include <linux/vmalloc.h>
081a9d04
BS
29#include <linux/cpumask.h>
30#include <linux/atomic.h>
31#include <linux/kthread.h>
32#include <linux/crc32.h>
db597605 33#include <linux/ktime.h>
61159a31
RW
34
35#include "power.h"
36
be8cd644 37#define HIBERNATE_SIG "S1SUSPEND"
61159a31 38
51fb352b
JS
39/*
40 * The swap map is a data structure used for keeping track of each page
41 * written to a swap partition. It consists of many swap_map_page
90133673 42 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
51fb352b
JS
43 * These structures are stored on the swap and linked together with the
44 * help of the .next_swap member.
45 *
46 * The swap map is created during suspend. The swap map pages are
47 * allocated and populated one at a time, so we only need one memory
48 * page to set up the entire structure.
49 *
081a9d04 50 * During resume we pick up all swap_map_page structures into a list.
51fb352b
JS
51 */
52
53#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
54
f8262d47
BS
55/*
56 * Number of free pages that are not high.
57 */
58static inline unsigned long low_free_pages(void)
59{
60 return nr_free_pages() - nr_free_highpages();
61}
62
63/*
64 * Number of pages required to be kept free while writing the image. Always
65 * half of all available low pages before the writing starts.
66 */
67static inline unsigned long reqd_free_pages(void)
68{
69 return low_free_pages() / 2;
70}
71
51fb352b
JS
72struct swap_map_page {
73 sector_t entries[MAP_PAGE_ENTRIES];
74 sector_t next_swap;
75};
76
081a9d04
BS
77struct swap_map_page_list {
78 struct swap_map_page *map;
79 struct swap_map_page_list *next;
80};
81
51fb352b
JS
82/**
83 * The swap_map_handle structure is used for handling swap in
84 * a file-alike way
85 */
86
87struct swap_map_handle {
88 struct swap_map_page *cur;
081a9d04 89 struct swap_map_page_list *maps;
51fb352b
JS
90 sector_t cur_swap;
91 sector_t first_sector;
92 unsigned int k;
f8262d47 93 unsigned long reqd_free_pages;
081a9d04 94 u32 crc32;
51fb352b
JS
95};
96
1b29c164 97struct swsusp_header {
081a9d04
BS
98 char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
99 sizeof(u32)];
100 u32 crc32;
3aef83e0 101 sector_t image;
a634cc10 102 unsigned int flags; /* Flags to pass to the "boot" kernel */
61159a31
RW
103 char orig_sig[10];
104 char sig[10];
52f5684c 105} __packed;
1b29c164
VG
106
107static struct swsusp_header *swsusp_header;
61159a31 108
0414f2ec
NC
109/**
110 * The following functions are used for tracing the allocated
111 * swap pages, so that they can be freed in case of an error.
112 */
113
114struct swsusp_extent {
115 struct rb_node node;
116 unsigned long start;
117 unsigned long end;
118};
119
120static struct rb_root swsusp_extents = RB_ROOT;
121
122static int swsusp_extents_insert(unsigned long swap_offset)
123{
124 struct rb_node **new = &(swsusp_extents.rb_node);
125 struct rb_node *parent = NULL;
126 struct swsusp_extent *ext;
127
128 /* Figure out where to put the new node */
129 while (*new) {
8316bd72 130 ext = rb_entry(*new, struct swsusp_extent, node);
0414f2ec
NC
131 parent = *new;
132 if (swap_offset < ext->start) {
133 /* Try to merge */
134 if (swap_offset == ext->start - 1) {
135 ext->start--;
136 return 0;
137 }
138 new = &((*new)->rb_left);
139 } else if (swap_offset > ext->end) {
140 /* Try to merge */
141 if (swap_offset == ext->end + 1) {
142 ext->end++;
143 return 0;
144 }
145 new = &((*new)->rb_right);
146 } else {
147 /* It already is in the tree */
148 return -EINVAL;
149 }
150 }
151 /* Add the new node and rebalance the tree. */
152 ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
153 if (!ext)
154 return -ENOMEM;
155
156 ext->start = swap_offset;
157 ext->end = swap_offset;
158 rb_link_node(&ext->node, parent, new);
159 rb_insert_color(&ext->node, &swsusp_extents);
160 return 0;
161}
162
163/**
164 * alloc_swapdev_block - allocate a swap page and register that it has
165 * been allocated, so that it can be freed in case of an error.
166 */
167
168sector_t alloc_swapdev_block(int swap)
169{
170 unsigned long offset;
171
910321ea 172 offset = swp_offset(get_swap_page_of_type(swap));
0414f2ec
NC
173 if (offset) {
174 if (swsusp_extents_insert(offset))
910321ea 175 swap_free(swp_entry(swap, offset));
0414f2ec
NC
176 else
177 return swapdev_block(swap, offset);
178 }
179 return 0;
180}
181
182/**
183 * free_all_swap_pages - free swap pages allocated for saving image data.
90133673 184 * It also frees the extents used to register which swap entries had been
0414f2ec
NC
185 * allocated.
186 */
187
188void free_all_swap_pages(int swap)
189{
190 struct rb_node *node;
191
192 while ((node = swsusp_extents.rb_node)) {
193 struct swsusp_extent *ext;
194 unsigned long offset;
195
196 ext = container_of(node, struct swsusp_extent, node);
197 rb_erase(node, &swsusp_extents);
198 for (offset = ext->start; offset <= ext->end; offset++)
910321ea 199 swap_free(swp_entry(swap, offset));
0414f2ec
NC
200
201 kfree(ext);
202 }
203}
204
205int swsusp_swap_in_use(void)
206{
207 return (swsusp_extents.rb_node != NULL);
208}
209
61159a31 210/*
3fc6b34f 211 * General things
61159a31
RW
212 */
213
214static unsigned short root_swap = 0xffff;
343df3c7
CH
215static struct block_device *hib_resume_bdev;
216
217struct hib_bio_batch {
218 atomic_t count;
219 wait_queue_head_t wait;
220 int error;
221};
222
223static void hib_init_batch(struct hib_bio_batch *hb)
224{
225 atomic_set(&hb->count, 0);
226 init_waitqueue_head(&hb->wait);
227 hb->error = 0;
228}
229
230static void hib_end_io(struct bio *bio, int error)
231{
232 struct hib_bio_batch *hb = bio->bi_private;
233 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
234 struct page *page = bio->bi_io_vec[0].bv_page;
235
236 if (!uptodate || error) {
237 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
238 imajor(bio->bi_bdev->bd_inode),
239 iminor(bio->bi_bdev->bd_inode),
240 (unsigned long long)bio->bi_iter.bi_sector);
241
242 if (!error)
243 error = -EIO;
244 }
245
246 if (bio_data_dir(bio) == WRITE)
247 put_page(page);
248
249 if (error && !hb->error)
250 hb->error = error;
251 if (atomic_dec_and_test(&hb->count))
252 wake_up(&hb->wait);
253
254 bio_put(bio);
255}
256
257static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
258 struct hib_bio_batch *hb)
259{
260 struct page *page = virt_to_page(addr);
261 struct bio *bio;
262 int error = 0;
263
264 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
265 bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
266 bio->bi_bdev = hib_resume_bdev;
267
268 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
269 printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
270 (unsigned long long)bio->bi_iter.bi_sector);
271 bio_put(bio);
272 return -EFAULT;
273 }
274
275 if (hb) {
276 bio->bi_end_io = hib_end_io;
277 bio->bi_private = hb;
278 atomic_inc(&hb->count);
279 submit_bio(rw, bio);
280 } else {
281 error = submit_bio_wait(rw, bio);
282 bio_put(bio);
283 }
284
285 return error;
286}
287
288static int hib_wait_io(struct hib_bio_batch *hb)
289{
290 wait_event(hb->wait, atomic_read(&hb->count) == 0);
291 return hb->error;
292}
3fc6b34f 293
3fc6b34f
RW
294/*
295 * Saving part
296 */
61159a31 297
51fb352b 298static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
61159a31
RW
299{
300 int error;
301
343df3c7 302 hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
1b29c164
VG
303 if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
304 !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
305 memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
3624eb04 306 memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
51fb352b 307 swsusp_header->image = handle->first_sector;
a634cc10 308 swsusp_header->flags = flags;
081a9d04
BS
309 if (flags & SF_CRC32_MODE)
310 swsusp_header->crc32 = handle->crc32;
343df3c7 311 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1b29c164 312 swsusp_header, NULL);
61159a31 313 } else {
23976728 314 printk(KERN_ERR "PM: Swap header not found!\n");
61159a31
RW
315 error = -ENODEV;
316 }
317 return error;
318}
319
320/**
321 * swsusp_swap_check - check if the resume device is a swap device
322 * and get its index (if so)
6f612af5
JS
323 *
324 * This is called before saving image
61159a31 325 */
6f612af5 326static int swsusp_swap_check(void)
61159a31 327{
3aef83e0
RW
328 int res;
329
7bf23687 330 res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
8a0d613f 331 &hib_resume_bdev);
3aef83e0
RW
332 if (res < 0)
333 return res;
334
335 root_swap = res;
e525fd89 336 res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
7bf23687
RW
337 if (res)
338 return res;
3aef83e0 339
8a0d613f 340 res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
3aef83e0 341 if (res < 0)
8a0d613f 342 blkdev_put(hib_resume_bdev, FMODE_WRITE);
61159a31 343
61159a31
RW
344 return res;
345}
346
347/**
348 * write_page - Write one page to given swap location.
349 * @buf: Address we're writing.
350 * @offset: Offset of the swap page we're writing to.
343df3c7 351 * @hb: bio completion batch
61159a31
RW
352 */
353
343df3c7 354static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
61159a31 355{
3aef83e0 356 void *src;
081a9d04 357 int ret;
3aef83e0
RW
358
359 if (!offset)
360 return -ENOSPC;
361
343df3c7 362 if (hb) {
5a21d489
BS
363 src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
364 __GFP_NORETRY);
3aef83e0 365 if (src) {
3ecb01df 366 copy_page(src, buf);
3aef83e0 367 } else {
343df3c7 368 ret = hib_wait_io(hb); /* Free pages */
081a9d04
BS
369 if (ret)
370 return ret;
5a21d489
BS
371 src = (void *)__get_free_page(__GFP_WAIT |
372 __GFP_NOWARN |
373 __GFP_NORETRY);
081a9d04
BS
374 if (src) {
375 copy_page(src, buf);
376 } else {
377 WARN_ON_ONCE(1);
343df3c7 378 hb = NULL; /* Go synchronous */
081a9d04
BS
379 src = buf;
380 }
ab954160 381 }
3aef83e0
RW
382 } else {
383 src = buf;
61159a31 384 }
343df3c7 385 return hib_submit_io(WRITE_SYNC, offset, src, hb);
61159a31
RW
386}
387
61159a31
RW
388static void release_swap_writer(struct swap_map_handle *handle)
389{
390 if (handle->cur)
391 free_page((unsigned long)handle->cur);
392 handle->cur = NULL;
61159a31
RW
393}
394
395static int get_swap_writer(struct swap_map_handle *handle)
396{
6f612af5
JS
397 int ret;
398
399 ret = swsusp_swap_check();
400 if (ret) {
401 if (ret != -ENOSPC)
402 printk(KERN_ERR "PM: Cannot find swap device, try "
403 "swapon -a.\n");
404 return ret;
405 }
61159a31 406 handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
6f612af5
JS
407 if (!handle->cur) {
408 ret = -ENOMEM;
409 goto err_close;
410 }
d1d241cc 411 handle->cur_swap = alloc_swapdev_block(root_swap);
61159a31 412 if (!handle->cur_swap) {
6f612af5
JS
413 ret = -ENOSPC;
414 goto err_rel;
61159a31
RW
415 }
416 handle->k = 0;
f8262d47 417 handle->reqd_free_pages = reqd_free_pages();
51fb352b 418 handle->first_sector = handle->cur_swap;
61159a31 419 return 0;
6f612af5
JS
420err_rel:
421 release_swap_writer(handle);
422err_close:
423 swsusp_close(FMODE_WRITE);
424 return ret;
61159a31
RW
425}
426
ab954160 427static int swap_write_page(struct swap_map_handle *handle, void *buf,
343df3c7 428 struct hib_bio_batch *hb)
ab954160
AM
429{
430 int error = 0;
3aef83e0 431 sector_t offset;
61159a31
RW
432
433 if (!handle->cur)
434 return -EINVAL;
d1d241cc 435 offset = alloc_swapdev_block(root_swap);
343df3c7 436 error = write_page(buf, offset, hb);
61159a31
RW
437 if (error)
438 return error;
439 handle->cur->entries[handle->k++] = offset;
440 if (handle->k >= MAP_PAGE_ENTRIES) {
d1d241cc 441 offset = alloc_swapdev_block(root_swap);
61159a31
RW
442 if (!offset)
443 return -ENOSPC;
444 handle->cur->next_swap = offset;
343df3c7 445 error = write_page(handle->cur, handle->cur_swap, hb);
61159a31 446 if (error)
ab954160 447 goto out;
3ecb01df 448 clear_page(handle->cur);
61159a31
RW
449 handle->cur_swap = offset;
450 handle->k = 0;
5a21d489 451
343df3c7
CH
452 if (hb && low_free_pages() <= handle->reqd_free_pages) {
453 error = hib_wait_io(hb);
5a21d489
BS
454 if (error)
455 goto out;
456 /*
457 * Recalculate the number of required free pages, to
458 * make sure we never take more than half.
459 */
460 handle->reqd_free_pages = reqd_free_pages();
461 }
081a9d04 462 }
59a49335 463 out:
ab954160 464 return error;
61159a31
RW
465}
466
467static int flush_swap_writer(struct swap_map_handle *handle)
468{
469 if (handle->cur && handle->cur_swap)
ab954160 470 return write_page(handle->cur, handle->cur_swap, NULL);
61159a31
RW
471 else
472 return -EINVAL;
473}
474
6f612af5
JS
475static int swap_writer_finish(struct swap_map_handle *handle,
476 unsigned int flags, int error)
477{
478 if (!error) {
479 flush_swap_writer(handle);
480 printk(KERN_INFO "PM: S");
481 error = mark_swapfiles(handle, flags);
482 printk("|\n");
483 }
484
485 if (error)
486 free_all_swap_pages(root_swap);
487 release_swap_writer(handle);
488 swsusp_close(FMODE_WRITE);
489
490 return error;
491}
492
f996fc96
BS
493/* We need to remember how much compressed data we need to read. */
494#define LZO_HEADER sizeof(size_t)
495
496/* Number of pages/bytes we'll compress at one time. */
497#define LZO_UNC_PAGES 32
498#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
499
500/* Number of pages/bytes we need for compressed data (worst case). */
501#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
502 LZO_HEADER, PAGE_SIZE)
503#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
504
081a9d04
BS
505/* Maximum number of threads for compression/decompression. */
506#define LZO_THREADS 3
507
5a21d489
BS
508/* Minimum/maximum number of pages for read buffering. */
509#define LZO_MIN_RD_PAGES 1024
510#define LZO_MAX_RD_PAGES 8192
081a9d04
BS
511
512
61159a31
RW
513/**
514 * save_image - save the suspend image data
515 */
516
517static int save_image(struct swap_map_handle *handle,
518 struct snapshot_handle *snapshot,
3a4f7577 519 unsigned int nr_to_write)
61159a31
RW
520{
521 unsigned int m;
522 int ret;
3a4f7577 523 int nr_pages;
ab954160 524 int err2;
343df3c7 525 struct hib_bio_batch hb;
db597605
TR
526 ktime_t start;
527 ktime_t stop;
61159a31 528
343df3c7
CH
529 hib_init_batch(&hb);
530
d8150d35 531 printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
23976728 532 nr_to_write);
d8150d35 533 m = nr_to_write / 10;
61159a31
RW
534 if (!m)
535 m = 1;
536 nr_pages = 0;
db597605 537 start = ktime_get();
4ff277f9 538 while (1) {
d3c1b24c 539 ret = snapshot_read_next(snapshot);
4ff277f9
JS
540 if (ret <= 0)
541 break;
343df3c7 542 ret = swap_write_page(handle, data_of(*snapshot), &hb);
4ff277f9
JS
543 if (ret)
544 break;
545 if (!(nr_pages % m))
d8150d35
BS
546 printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
547 nr_pages / m * 10);
4ff277f9
JS
548 nr_pages++;
549 }
343df3c7 550 err2 = hib_wait_io(&hb);
db597605 551 stop = ktime_get();
4ff277f9
JS
552 if (!ret)
553 ret = err2;
554 if (!ret)
d8150d35 555 printk(KERN_INFO "PM: Image saving done.\n");
db597605 556 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
4ff277f9 557 return ret;
61159a31
RW
558}
559
081a9d04
BS
560/**
561 * Structure used for CRC32.
562 */
563struct crc_data {
564 struct task_struct *thr; /* thread */
565 atomic_t ready; /* ready to start flag */
566 atomic_t stop; /* ready to stop flag */
567 unsigned run_threads; /* nr current threads */
568 wait_queue_head_t go; /* start crc update */
569 wait_queue_head_t done; /* crc update done */
570 u32 *crc32; /* points to handle's crc32 */
571 size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
572 unsigned char *unc[LZO_THREADS]; /* uncompressed data */
573};
574
575/**
576 * CRC32 update function that runs in its own thread.
577 */
578static int crc32_threadfn(void *data)
579{
580 struct crc_data *d = data;
581 unsigned i;
582
583 while (1) {
584 wait_event(d->go, atomic_read(&d->ready) ||
585 kthread_should_stop());
586 if (kthread_should_stop()) {
587 d->thr = NULL;
588 atomic_set(&d->stop, 1);
589 wake_up(&d->done);
590 break;
591 }
592 atomic_set(&d->ready, 0);
593
594 for (i = 0; i < d->run_threads; i++)
595 *d->crc32 = crc32_le(*d->crc32,
596 d->unc[i], *d->unc_len[i]);
597 atomic_set(&d->stop, 1);
598 wake_up(&d->done);
599 }
600 return 0;
601}
602/**
603 * Structure used for LZO data compression.
604 */
605struct cmp_data {
606 struct task_struct *thr; /* thread */
607 atomic_t ready; /* ready to start flag */
608 atomic_t stop; /* ready to stop flag */
609 int ret; /* return code */
610 wait_queue_head_t go; /* start compression */
611 wait_queue_head_t done; /* compression done */
612 size_t unc_len; /* uncompressed length */
613 size_t cmp_len; /* compressed length */
614 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
615 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
616 unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
617};
618
619/**
620 * Compression function that runs in its own thread.
621 */
622static int lzo_compress_threadfn(void *data)
623{
624 struct cmp_data *d = data;
625
626 while (1) {
627 wait_event(d->go, atomic_read(&d->ready) ||
628 kthread_should_stop());
629 if (kthread_should_stop()) {
630 d->thr = NULL;
631 d->ret = -1;
632 atomic_set(&d->stop, 1);
633 wake_up(&d->done);
634 break;
635 }
636 atomic_set(&d->ready, 0);
637
638 d->ret = lzo1x_1_compress(d->unc, d->unc_len,
639 d->cmp + LZO_HEADER, &d->cmp_len,
640 d->wrk);
641 atomic_set(&d->stop, 1);
642 wake_up(&d->done);
643 }
644 return 0;
645}
f996fc96
BS
646
647/**
648 * save_image_lzo - Save the suspend image data compressed with LZO.
057b0a75 649 * @handle: Swap map handle to use for saving the image.
f996fc96
BS
650 * @snapshot: Image to read data from.
651 * @nr_to_write: Number of pages to save.
652 */
653static int save_image_lzo(struct swap_map_handle *handle,
654 struct snapshot_handle *snapshot,
655 unsigned int nr_to_write)
656{
657 unsigned int m;
658 int ret = 0;
659 int nr_pages;
660 int err2;
343df3c7 661 struct hib_bio_batch hb;
db597605
TR
662 ktime_t start;
663 ktime_t stop;
081a9d04
BS
664 size_t off;
665 unsigned thr, run_threads, nr_threads;
666 unsigned char *page = NULL;
667 struct cmp_data *data = NULL;
668 struct crc_data *crc = NULL;
669
343df3c7
CH
670 hib_init_batch(&hb);
671
081a9d04
BS
672 /*
673 * We'll limit the number of threads for compression to limit memory
674 * footprint.
675 */
676 nr_threads = num_online_cpus() - 1;
677 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
f996fc96
BS
678
679 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
680 if (!page) {
681 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
081a9d04
BS
682 ret = -ENOMEM;
683 goto out_clean;
f996fc96
BS
684 }
685
081a9d04
BS
686 data = vmalloc(sizeof(*data) * nr_threads);
687 if (!data) {
688 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
689 ret = -ENOMEM;
690 goto out_clean;
f996fc96 691 }
081a9d04
BS
692 for (thr = 0; thr < nr_threads; thr++)
693 memset(&data[thr], 0, offsetof(struct cmp_data, go));
f996fc96 694
081a9d04
BS
695 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
696 if (!crc) {
697 printk(KERN_ERR "PM: Failed to allocate crc\n");
698 ret = -ENOMEM;
699 goto out_clean;
700 }
701 memset(crc, 0, offsetof(struct crc_data, go));
702
703 /*
704 * Start the compression threads.
705 */
706 for (thr = 0; thr < nr_threads; thr++) {
707 init_waitqueue_head(&data[thr].go);
708 init_waitqueue_head(&data[thr].done);
709
710 data[thr].thr = kthread_run(lzo_compress_threadfn,
711 &data[thr],
712 "image_compress/%u", thr);
713 if (IS_ERR(data[thr].thr)) {
714 data[thr].thr = NULL;
715 printk(KERN_ERR
716 "PM: Cannot start compression threads\n");
717 ret = -ENOMEM;
718 goto out_clean;
719 }
f996fc96
BS
720 }
721
081a9d04
BS
722 /*
723 * Start the CRC32 thread.
724 */
725 init_waitqueue_head(&crc->go);
726 init_waitqueue_head(&crc->done);
727
728 handle->crc32 = 0;
729 crc->crc32 = &handle->crc32;
730 for (thr = 0; thr < nr_threads; thr++) {
731 crc->unc[thr] = data[thr].unc;
732 crc->unc_len[thr] = &data[thr].unc_len;
733 }
734
735 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
736 if (IS_ERR(crc->thr)) {
737 crc->thr = NULL;
738 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
739 ret = -ENOMEM;
740 goto out_clean;
f996fc96
BS
741 }
742
5a21d489
BS
743 /*
744 * Adjust the number of required free pages after all allocations have
745 * been done. We don't want to run out of pages when writing.
746 */
747 handle->reqd_free_pages = reqd_free_pages();
748
f996fc96 749 printk(KERN_INFO
081a9d04 750 "PM: Using %u thread(s) for compression.\n"
d8150d35 751 "PM: Compressing and saving image data (%u pages)...\n",
081a9d04 752 nr_threads, nr_to_write);
d8150d35 753 m = nr_to_write / 10;
f996fc96
BS
754 if (!m)
755 m = 1;
756 nr_pages = 0;
db597605 757 start = ktime_get();
f996fc96 758 for (;;) {
081a9d04
BS
759 for (thr = 0; thr < nr_threads; thr++) {
760 for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
761 ret = snapshot_read_next(snapshot);
762 if (ret < 0)
763 goto out_finish;
764
765 if (!ret)
766 break;
767
768 memcpy(data[thr].unc + off,
769 data_of(*snapshot), PAGE_SIZE);
770
771 if (!(nr_pages % m))
d8150d35
BS
772 printk(KERN_INFO
773 "PM: Image saving progress: "
774 "%3d%%\n",
775 nr_pages / m * 10);
081a9d04
BS
776 nr_pages++;
777 }
778 if (!off)
f996fc96
BS
779 break;
780
081a9d04 781 data[thr].unc_len = off;
f996fc96 782
081a9d04
BS
783 atomic_set(&data[thr].ready, 1);
784 wake_up(&data[thr].go);
f996fc96
BS
785 }
786
081a9d04 787 if (!thr)
f996fc96
BS
788 break;
789
081a9d04
BS
790 crc->run_threads = thr;
791 atomic_set(&crc->ready, 1);
792 wake_up(&crc->go);
f996fc96 793
081a9d04
BS
794 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
795 wait_event(data[thr].done,
796 atomic_read(&data[thr].stop));
797 atomic_set(&data[thr].stop, 0);
f996fc96 798
081a9d04 799 ret = data[thr].ret;
f996fc96 800
081a9d04
BS
801 if (ret < 0) {
802 printk(KERN_ERR "PM: LZO compression failed\n");
803 goto out_finish;
804 }
f996fc96 805
081a9d04
BS
806 if (unlikely(!data[thr].cmp_len ||
807 data[thr].cmp_len >
808 lzo1x_worst_compress(data[thr].unc_len))) {
809 printk(KERN_ERR
810 "PM: Invalid LZO compressed length\n");
811 ret = -1;
f996fc96 812 goto out_finish;
081a9d04
BS
813 }
814
815 *(size_t *)data[thr].cmp = data[thr].cmp_len;
816
817 /*
818 * Given we are writing one page at a time to disk, we
819 * copy that much from the buffer, although the last
820 * bit will likely be smaller than full page. This is
821 * OK - we saved the length of the compressed data, so
822 * any garbage at the end will be discarded when we
823 * read it.
824 */
825 for (off = 0;
826 off < LZO_HEADER + data[thr].cmp_len;
827 off += PAGE_SIZE) {
828 memcpy(page, data[thr].cmp + off, PAGE_SIZE);
829
343df3c7 830 ret = swap_write_page(handle, page, &hb);
081a9d04
BS
831 if (ret)
832 goto out_finish;
833 }
f996fc96 834 }
081a9d04
BS
835
836 wait_event(crc->done, atomic_read(&crc->stop));
837 atomic_set(&crc->stop, 0);
f996fc96
BS
838 }
839
840out_finish:
343df3c7 841 err2 = hib_wait_io(&hb);
db597605 842 stop = ktime_get();
f996fc96
BS
843 if (!ret)
844 ret = err2;
d8150d35
BS
845 if (!ret)
846 printk(KERN_INFO "PM: Image saving done.\n");
db597605 847 swsusp_show_speed(start, stop, nr_to_write, "Wrote");
081a9d04
BS
848out_clean:
849 if (crc) {
850 if (crc->thr)
851 kthread_stop(crc->thr);
852 kfree(crc);
853 }
854 if (data) {
855 for (thr = 0; thr < nr_threads; thr++)
856 if (data[thr].thr)
857 kthread_stop(data[thr].thr);
858 vfree(data);
859 }
860 if (page) free_page((unsigned long)page);
f996fc96
BS
861
862 return ret;
863}
864
61159a31
RW
865/**
866 * enough_swap - Make sure we have enough swap to save the image.
867 *
868 * Returns TRUE or FALSE after checking the total amount of swap
869 * space avaiable from the resume partition.
870 */
871
f996fc96 872static int enough_swap(unsigned int nr_pages, unsigned int flags)
61159a31
RW
873{
874 unsigned int free_swap = count_swap_pages(root_swap, 1);
f996fc96 875 unsigned int required;
61159a31 876
23976728 877 pr_debug("PM: Free swap pages: %u\n", free_swap);
f996fc96 878
ee34a370 879 required = PAGES_FOR_IO + nr_pages;
f996fc96 880 return free_swap > required;
61159a31
RW
881}
882
883/**
884 * swsusp_write - Write entire image and metadata.
a634cc10 885 * @flags: flags to pass to the "boot" kernel in the image header
61159a31
RW
886 *
887 * It is important _NOT_ to umount filesystems at this point. We want
888 * them synced (in case something goes wrong) but we DO not want to mark
889 * filesystem clean: it is not. (And it does not matter, if we resume
890 * correctly, we'll mark system clean, anyway.)
891 */
892
a634cc10 893int swsusp_write(unsigned int flags)
61159a31
RW
894{
895 struct swap_map_handle handle;
896 struct snapshot_handle snapshot;
897 struct swsusp_info *header;
6f612af5 898 unsigned long pages;
61159a31
RW
899 int error;
900
6f612af5
JS
901 pages = snapshot_get_image_size();
902 error = get_swap_writer(&handle);
3aef83e0 903 if (error) {
6f612af5 904 printk(KERN_ERR "PM: Cannot get swap writer\n");
61159a31
RW
905 return error;
906 }
ee34a370
BS
907 if (flags & SF_NOCOMPRESS_MODE) {
908 if (!enough_swap(pages, flags)) {
909 printk(KERN_ERR "PM: Not enough free swap\n");
910 error = -ENOSPC;
911 goto out_finish;
912 }
6f612af5 913 }
61159a31 914 memset(&snapshot, 0, sizeof(struct snapshot_handle));
d3c1b24c 915 error = snapshot_read_next(&snapshot);
3aef83e0
RW
916 if (error < PAGE_SIZE) {
917 if (error >= 0)
918 error = -EFAULT;
919
6f612af5 920 goto out_finish;
3aef83e0 921 }
61159a31 922 header = (struct swsusp_info *)data_of(snapshot);
6f612af5 923 error = swap_write_page(&handle, header, NULL);
f996fc96
BS
924 if (!error) {
925 error = (flags & SF_NOCOMPRESS_MODE) ?
926 save_image(&handle, &snapshot, pages - 1) :
927 save_image_lzo(&handle, &snapshot, pages - 1);
928 }
6f612af5
JS
929out_finish:
930 error = swap_writer_finish(&handle, flags, error);
61159a31
RW
931 return error;
932}
933
61159a31
RW
934/**
935 * The following functions allow us to read data using a swap map
936 * in a file-alike way
937 */
938
939static void release_swap_reader(struct swap_map_handle *handle)
940{
081a9d04
BS
941 struct swap_map_page_list *tmp;
942
943 while (handle->maps) {
944 if (handle->maps->map)
945 free_page((unsigned long)handle->maps->map);
946 tmp = handle->maps;
947 handle->maps = handle->maps->next;
948 kfree(tmp);
949 }
61159a31
RW
950 handle->cur = NULL;
951}
952
6f612af5
JS
953static int get_swap_reader(struct swap_map_handle *handle,
954 unsigned int *flags_p)
61159a31
RW
955{
956 int error;
081a9d04
BS
957 struct swap_map_page_list *tmp, *last;
958 sector_t offset;
61159a31 959
6f612af5
JS
960 *flags_p = swsusp_header->flags;
961
962 if (!swsusp_header->image) /* how can this happen? */
61159a31 963 return -EINVAL;
3aef83e0 964
081a9d04
BS
965 handle->cur = NULL;
966 last = handle->maps = NULL;
967 offset = swsusp_header->image;
968 while (offset) {
969 tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
970 if (!tmp) {
971 release_swap_reader(handle);
972 return -ENOMEM;
973 }
974 memset(tmp, 0, sizeof(*tmp));
975 if (!handle->maps)
976 handle->maps = tmp;
977 if (last)
978 last->next = tmp;
979 last = tmp;
980
981 tmp->map = (struct swap_map_page *)
982 __get_free_page(__GFP_WAIT | __GFP_HIGH);
983 if (!tmp->map) {
984 release_swap_reader(handle);
985 return -ENOMEM;
986 }
3aef83e0 987
343df3c7 988 error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
081a9d04
BS
989 if (error) {
990 release_swap_reader(handle);
991 return error;
992 }
993 offset = tmp->map->next_swap;
61159a31
RW
994 }
995 handle->k = 0;
081a9d04 996 handle->cur = handle->maps->map;
61159a31
RW
997 return 0;
998}
999
546e0d27 1000static int swap_read_page(struct swap_map_handle *handle, void *buf,
343df3c7 1001 struct hib_bio_batch *hb)
61159a31 1002{
3aef83e0 1003 sector_t offset;
61159a31 1004 int error;
081a9d04 1005 struct swap_map_page_list *tmp;
61159a31
RW
1006
1007 if (!handle->cur)
1008 return -EINVAL;
1009 offset = handle->cur->entries[handle->k];
1010 if (!offset)
1011 return -EFAULT;
343df3c7 1012 error = hib_submit_io(READ_SYNC, offset, buf, hb);
61159a31
RW
1013 if (error)
1014 return error;
1015 if (++handle->k >= MAP_PAGE_ENTRIES) {
1016 handle->k = 0;
081a9d04
BS
1017 free_page((unsigned long)handle->maps->map);
1018 tmp = handle->maps;
1019 handle->maps = handle->maps->next;
1020 kfree(tmp);
1021 if (!handle->maps)
61159a31 1022 release_swap_reader(handle);
081a9d04
BS
1023 else
1024 handle->cur = handle->maps->map;
61159a31
RW
1025 }
1026 return error;
1027}
1028
6f612af5
JS
1029static int swap_reader_finish(struct swap_map_handle *handle)
1030{
1031 release_swap_reader(handle);
1032
1033 return 0;
1034}
1035
61159a31
RW
1036/**
1037 * load_image - load the image using the swap map handle
1038 * @handle and the snapshot handle @snapshot
1039 * (assume there are @nr_pages pages to load)
1040 */
1041
1042static int load_image(struct swap_map_handle *handle,
1043 struct snapshot_handle *snapshot,
546e0d27 1044 unsigned int nr_to_read)
61159a31
RW
1045{
1046 unsigned int m;
081a9d04 1047 int ret = 0;
db597605
TR
1048 ktime_t start;
1049 ktime_t stop;
343df3c7 1050 struct hib_bio_batch hb;
546e0d27
AM
1051 int err2;
1052 unsigned nr_pages;
61159a31 1053
343df3c7
CH
1054 hib_init_batch(&hb);
1055
d8150d35 1056 printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
23976728 1057 nr_to_read);
d8150d35 1058 m = nr_to_read / 10;
61159a31
RW
1059 if (!m)
1060 m = 1;
1061 nr_pages = 0;
db597605 1062 start = ktime_get();
546e0d27 1063 for ( ; ; ) {
081a9d04
BS
1064 ret = snapshot_write_next(snapshot);
1065 if (ret <= 0)
546e0d27 1066 break;
343df3c7 1067 ret = swap_read_page(handle, data_of(*snapshot), &hb);
081a9d04 1068 if (ret)
546e0d27
AM
1069 break;
1070 if (snapshot->sync_read)
343df3c7 1071 ret = hib_wait_io(&hb);
081a9d04 1072 if (ret)
546e0d27
AM
1073 break;
1074 if (!(nr_pages % m))
d8150d35
BS
1075 printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
1076 nr_pages / m * 10);
546e0d27
AM
1077 nr_pages++;
1078 }
343df3c7 1079 err2 = hib_wait_io(&hb);
db597605 1080 stop = ktime_get();
081a9d04
BS
1081 if (!ret)
1082 ret = err2;
1083 if (!ret) {
d8150d35 1084 printk(KERN_INFO "PM: Image loading done.\n");
8357376d 1085 snapshot_write_finalize(snapshot);
e655a250 1086 if (!snapshot_image_loaded(snapshot))
081a9d04 1087 ret = -ENODATA;
d8150d35 1088 }
db597605 1089 swsusp_show_speed(start, stop, nr_to_read, "Read");
081a9d04
BS
1090 return ret;
1091}
1092
1093/**
1094 * Structure used for LZO data decompression.
1095 */
1096struct dec_data {
1097 struct task_struct *thr; /* thread */
1098 atomic_t ready; /* ready to start flag */
1099 atomic_t stop; /* ready to stop flag */
1100 int ret; /* return code */
1101 wait_queue_head_t go; /* start decompression */
1102 wait_queue_head_t done; /* decompression done */
1103 size_t unc_len; /* uncompressed length */
1104 size_t cmp_len; /* compressed length */
1105 unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
1106 unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
1107};
1108
1109/**
1110 * Deompression function that runs in its own thread.
1111 */
1112static int lzo_decompress_threadfn(void *data)
1113{
1114 struct dec_data *d = data;
1115
1116 while (1) {
1117 wait_event(d->go, atomic_read(&d->ready) ||
1118 kthread_should_stop());
1119 if (kthread_should_stop()) {
1120 d->thr = NULL;
1121 d->ret = -1;
1122 atomic_set(&d->stop, 1);
1123 wake_up(&d->done);
1124 break;
1125 }
1126 atomic_set(&d->ready, 0);
1127
1128 d->unc_len = LZO_UNC_SIZE;
1129 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1130 d->unc, &d->unc_len);
1131 atomic_set(&d->stop, 1);
1132 wake_up(&d->done);
1133 }
1134 return 0;
61159a31
RW
1135}
1136
f996fc96
BS
1137/**
1138 * load_image_lzo - Load compressed image data and decompress them with LZO.
1139 * @handle: Swap map handle to use for loading data.
1140 * @snapshot: Image to copy uncompressed data into.
1141 * @nr_to_read: Number of pages to load.
1142 */
1143static int load_image_lzo(struct swap_map_handle *handle,
1144 struct snapshot_handle *snapshot,
1145 unsigned int nr_to_read)
1146{
1147 unsigned int m;
081a9d04
BS
1148 int ret = 0;
1149 int eof = 0;
343df3c7 1150 struct hib_bio_batch hb;
db597605
TR
1151 ktime_t start;
1152 ktime_t stop;
f996fc96 1153 unsigned nr_pages;
081a9d04
BS
1154 size_t off;
1155 unsigned i, thr, run_threads, nr_threads;
1156 unsigned ring = 0, pg = 0, ring_size = 0,
1157 have = 0, want, need, asked = 0;
5a21d489 1158 unsigned long read_pages = 0;
081a9d04
BS
1159 unsigned char **page = NULL;
1160 struct dec_data *data = NULL;
1161 struct crc_data *crc = NULL;
1162
343df3c7
CH
1163 hib_init_batch(&hb);
1164
081a9d04
BS
1165 /*
1166 * We'll limit the number of threads for decompression to limit memory
1167 * footprint.
1168 */
1169 nr_threads = num_online_cpus() - 1;
1170 nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
1171
5a21d489 1172 page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
081a9d04
BS
1173 if (!page) {
1174 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
1175 ret = -ENOMEM;
1176 goto out_clean;
1177 }
9f339caf 1178
081a9d04
BS
1179 data = vmalloc(sizeof(*data) * nr_threads);
1180 if (!data) {
1181 printk(KERN_ERR "PM: Failed to allocate LZO data\n");
1182 ret = -ENOMEM;
1183 goto out_clean;
1184 }
1185 for (thr = 0; thr < nr_threads; thr++)
1186 memset(&data[thr], 0, offsetof(struct dec_data, go));
9f339caf 1187
081a9d04
BS
1188 crc = kmalloc(sizeof(*crc), GFP_KERNEL);
1189 if (!crc) {
1190 printk(KERN_ERR "PM: Failed to allocate crc\n");
1191 ret = -ENOMEM;
1192 goto out_clean;
1193 }
1194 memset(crc, 0, offsetof(struct crc_data, go));
1195
1196 /*
1197 * Start the decompression threads.
1198 */
1199 for (thr = 0; thr < nr_threads; thr++) {
1200 init_waitqueue_head(&data[thr].go);
1201 init_waitqueue_head(&data[thr].done);
1202
1203 data[thr].thr = kthread_run(lzo_decompress_threadfn,
1204 &data[thr],
1205 "image_decompress/%u", thr);
1206 if (IS_ERR(data[thr].thr)) {
1207 data[thr].thr = NULL;
1208 printk(KERN_ERR
1209 "PM: Cannot start decompression threads\n");
1210 ret = -ENOMEM;
1211 goto out_clean;
9f339caf 1212 }
f996fc96
BS
1213 }
1214
081a9d04
BS
1215 /*
1216 * Start the CRC32 thread.
1217 */
1218 init_waitqueue_head(&crc->go);
1219 init_waitqueue_head(&crc->done);
1220
1221 handle->crc32 = 0;
1222 crc->crc32 = &handle->crc32;
1223 for (thr = 0; thr < nr_threads; thr++) {
1224 crc->unc[thr] = data[thr].unc;
1225 crc->unc_len[thr] = &data[thr].unc_len;
f996fc96
BS
1226 }
1227
081a9d04
BS
1228 crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
1229 if (IS_ERR(crc->thr)) {
1230 crc->thr = NULL;
1231 printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
1232 ret = -ENOMEM;
1233 goto out_clean;
1234 }
9f339caf 1235
081a9d04 1236 /*
5a21d489
BS
1237 * Set the number of pages for read buffering.
1238 * This is complete guesswork, because we'll only know the real
1239 * picture once prepare_image() is called, which is much later on
1240 * during the image load phase. We'll assume the worst case and
1241 * say that none of the image pages are from high memory.
081a9d04 1242 */
5a21d489
BS
1243 if (low_free_pages() > snapshot_get_image_size())
1244 read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
1245 read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
9f339caf 1246
081a9d04
BS
1247 for (i = 0; i < read_pages; i++) {
1248 page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
1249 __GFP_WAIT | __GFP_HIGH :
5a21d489
BS
1250 __GFP_WAIT | __GFP_NOWARN |
1251 __GFP_NORETRY);
1252
081a9d04
BS
1253 if (!page[i]) {
1254 if (i < LZO_CMP_PAGES) {
1255 ring_size = i;
1256 printk(KERN_ERR
1257 "PM: Failed to allocate LZO pages\n");
1258 ret = -ENOMEM;
1259 goto out_clean;
1260 } else {
1261 break;
1262 }
1263 }
f996fc96 1264 }
081a9d04 1265 want = ring_size = i;
f996fc96
BS
1266
1267 printk(KERN_INFO
081a9d04 1268 "PM: Using %u thread(s) for decompression.\n"
d8150d35 1269 "PM: Loading and decompressing image data (%u pages)...\n",
081a9d04 1270 nr_threads, nr_to_read);
d8150d35 1271 m = nr_to_read / 10;
f996fc96
BS
1272 if (!m)
1273 m = 1;
1274 nr_pages = 0;
db597605 1275 start = ktime_get();
f996fc96 1276
081a9d04
BS
1277 ret = snapshot_write_next(snapshot);
1278 if (ret <= 0)
f996fc96
BS
1279 goto out_finish;
1280
081a9d04
BS
1281 for(;;) {
1282 for (i = 0; !eof && i < want; i++) {
343df3c7 1283 ret = swap_read_page(handle, page[ring], &hb);
081a9d04
BS
1284 if (ret) {
1285 /*
1286 * On real read error, finish. On end of data,
1287 * set EOF flag and just exit the read loop.
1288 */
1289 if (handle->cur &&
1290 handle->cur->entries[handle->k]) {
1291 goto out_finish;
1292 } else {
1293 eof = 1;
1294 break;
1295 }
1296 }
1297 if (++ring >= ring_size)
1298 ring = 0;
f996fc96 1299 }
081a9d04
BS
1300 asked += i;
1301 want -= i;
f996fc96 1302
081a9d04
BS
1303 /*
1304 * We are out of data, wait for some more.
1305 */
1306 if (!have) {
1307 if (!asked)
1308 break;
1309
343df3c7 1310 ret = hib_wait_io(&hb);
081a9d04 1311 if (ret)
f996fc96 1312 goto out_finish;
081a9d04
BS
1313 have += asked;
1314 asked = 0;
1315 if (eof)
1316 eof = 2;
9f339caf 1317 }
f996fc96 1318
081a9d04
BS
1319 if (crc->run_threads) {
1320 wait_event(crc->done, atomic_read(&crc->stop));
1321 atomic_set(&crc->stop, 0);
1322 crc->run_threads = 0;
f996fc96
BS
1323 }
1324
081a9d04
BS
1325 for (thr = 0; have && thr < nr_threads; thr++) {
1326 data[thr].cmp_len = *(size_t *)page[pg];
1327 if (unlikely(!data[thr].cmp_len ||
1328 data[thr].cmp_len >
1329 lzo1x_worst_compress(LZO_UNC_SIZE))) {
1330 printk(KERN_ERR
1331 "PM: Invalid LZO compressed length\n");
1332 ret = -1;
1333 goto out_finish;
1334 }
1335
1336 need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
1337 PAGE_SIZE);
1338 if (need > have) {
1339 if (eof > 1) {
1340 ret = -1;
1341 goto out_finish;
1342 }
1343 break;
1344 }
1345
1346 for (off = 0;
1347 off < LZO_HEADER + data[thr].cmp_len;
1348 off += PAGE_SIZE) {
1349 memcpy(data[thr].cmp + off,
1350 page[pg], PAGE_SIZE);
1351 have--;
1352 want++;
1353 if (++pg >= ring_size)
1354 pg = 0;
1355 }
1356
1357 atomic_set(&data[thr].ready, 1);
1358 wake_up(&data[thr].go);
f996fc96
BS
1359 }
1360
081a9d04
BS
1361 /*
1362 * Wait for more data while we are decompressing.
1363 */
1364 if (have < LZO_CMP_PAGES && asked) {
343df3c7 1365 ret = hib_wait_io(&hb);
081a9d04
BS
1366 if (ret)
1367 goto out_finish;
1368 have += asked;
1369 asked = 0;
1370 if (eof)
1371 eof = 2;
f996fc96
BS
1372 }
1373
081a9d04
BS
1374 for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
1375 wait_event(data[thr].done,
1376 atomic_read(&data[thr].stop));
1377 atomic_set(&data[thr].stop, 0);
1378
1379 ret = data[thr].ret;
f996fc96 1380
081a9d04
BS
1381 if (ret < 0) {
1382 printk(KERN_ERR
1383 "PM: LZO decompression failed\n");
1384 goto out_finish;
1385 }
f996fc96 1386
081a9d04
BS
1387 if (unlikely(!data[thr].unc_len ||
1388 data[thr].unc_len > LZO_UNC_SIZE ||
1389 data[thr].unc_len & (PAGE_SIZE - 1))) {
1390 printk(KERN_ERR
1391 "PM: Invalid LZO uncompressed length\n");
1392 ret = -1;
f996fc96 1393 goto out_finish;
081a9d04
BS
1394 }
1395
1396 for (off = 0;
1397 off < data[thr].unc_len; off += PAGE_SIZE) {
1398 memcpy(data_of(*snapshot),
1399 data[thr].unc + off, PAGE_SIZE);
1400
1401 if (!(nr_pages % m))
d8150d35
BS
1402 printk(KERN_INFO
1403 "PM: Image loading progress: "
1404 "%3d%%\n",
1405 nr_pages / m * 10);
081a9d04
BS
1406 nr_pages++;
1407
1408 ret = snapshot_write_next(snapshot);
1409 if (ret <= 0) {
1410 crc->run_threads = thr + 1;
1411 atomic_set(&crc->ready, 1);
1412 wake_up(&crc->go);
1413 goto out_finish;
1414 }
1415 }
f996fc96 1416 }
081a9d04
BS
1417
1418 crc->run_threads = thr;
1419 atomic_set(&crc->ready, 1);
1420 wake_up(&crc->go);
f996fc96
BS
1421 }
1422
1423out_finish:
081a9d04
BS
1424 if (crc->run_threads) {
1425 wait_event(crc->done, atomic_read(&crc->stop));
1426 atomic_set(&crc->stop, 0);
1427 }
db597605 1428 stop = ktime_get();
081a9d04 1429 if (!ret) {
d8150d35 1430 printk(KERN_INFO "PM: Image loading done.\n");
f996fc96
BS
1431 snapshot_write_finalize(snapshot);
1432 if (!snapshot_image_loaded(snapshot))
081a9d04
BS
1433 ret = -ENODATA;
1434 if (!ret) {
1435 if (swsusp_header->flags & SF_CRC32_MODE) {
1436 if(handle->crc32 != swsusp_header->crc32) {
1437 printk(KERN_ERR
1438 "PM: Invalid image CRC32!\n");
1439 ret = -ENODATA;
1440 }
1441 }
1442 }
d8150d35 1443 }
db597605 1444 swsusp_show_speed(start, stop, nr_to_read, "Read");
081a9d04
BS
1445out_clean:
1446 for (i = 0; i < ring_size; i++)
9f339caf 1447 free_page((unsigned long)page[i]);
081a9d04
BS
1448 if (crc) {
1449 if (crc->thr)
1450 kthread_stop(crc->thr);
1451 kfree(crc);
1452 }
1453 if (data) {
1454 for (thr = 0; thr < nr_threads; thr++)
1455 if (data[thr].thr)
1456 kthread_stop(data[thr].thr);
1457 vfree(data);
1458 }
6c45de0d 1459 vfree(page);
f996fc96 1460
081a9d04 1461 return ret;
f996fc96
BS
1462}
1463
a634cc10
RW
1464/**
1465 * swsusp_read - read the hibernation image.
1466 * @flags_p: flags passed by the "frozen" kernel in the image header should
b595076a 1467 * be written into this memory location
a634cc10
RW
1468 */
1469
1470int swsusp_read(unsigned int *flags_p)
61159a31
RW
1471{
1472 int error;
1473 struct swap_map_handle handle;
1474 struct snapshot_handle snapshot;
1475 struct swsusp_info *header;
1476
61159a31 1477 memset(&snapshot, 0, sizeof(struct snapshot_handle));
d3c1b24c 1478 error = snapshot_write_next(&snapshot);
61159a31
RW
1479 if (error < PAGE_SIZE)
1480 return error < 0 ? error : -EFAULT;
1481 header = (struct swsusp_info *)data_of(snapshot);
6f612af5
JS
1482 error = get_swap_reader(&handle, flags_p);
1483 if (error)
1484 goto end;
61159a31 1485 if (!error)
546e0d27 1486 error = swap_read_page(&handle, header, NULL);
f996fc96
BS
1487 if (!error) {
1488 error = (*flags_p & SF_NOCOMPRESS_MODE) ?
1489 load_image(&handle, &snapshot, header->pages - 1) :
1490 load_image_lzo(&handle, &snapshot, header->pages - 1);
1491 }
6f612af5
JS
1492 swap_reader_finish(&handle);
1493end:
61159a31 1494 if (!error)
23976728 1495 pr_debug("PM: Image successfully loaded\n");
61159a31 1496 else
23976728 1497 pr_debug("PM: Error %d resuming\n", error);
61159a31
RW
1498 return error;
1499}
1500
1501/**
1502 * swsusp_check - Check for swsusp signature in the resume device
1503 */
1504
1505int swsusp_check(void)
1506{
1507 int error;
1508
d4d77629
TH
1509 hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
1510 FMODE_READ, NULL);
8a0d613f
JS
1511 if (!IS_ERR(hib_resume_bdev)) {
1512 set_blocksize(hib_resume_bdev, PAGE_SIZE);
3ecb01df 1513 clear_page(swsusp_header);
343df3c7 1514 error = hib_submit_io(READ_SYNC, swsusp_resume_block,
1b29c164 1515 swsusp_header, NULL);
9a154d9d 1516 if (error)
76b57e61 1517 goto put;
9a154d9d 1518
3624eb04 1519 if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
1b29c164 1520 memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
61159a31 1521 /* Reset swap signature now */
343df3c7 1522 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
1b29c164 1523 swsusp_header, NULL);
61159a31 1524 } else {
76b57e61 1525 error = -EINVAL;
61159a31 1526 }
76b57e61
JS
1527
1528put:
61159a31 1529 if (error)
8a0d613f 1530 blkdev_put(hib_resume_bdev, FMODE_READ);
61159a31 1531 else
d0941ead 1532 pr_debug("PM: Image signature found, resuming\n");
61159a31 1533 } else {
8a0d613f 1534 error = PTR_ERR(hib_resume_bdev);
61159a31
RW
1535 }
1536
1537 if (error)
d0941ead 1538 pr_debug("PM: Image not found (code %d)\n", error);
61159a31
RW
1539
1540 return error;
1541}
1542
1543/**
1544 * swsusp_close - close swap device.
1545 */
1546
c2dd0dae 1547void swsusp_close(fmode_t mode)
61159a31 1548{
8a0d613f 1549 if (IS_ERR(hib_resume_bdev)) {
23976728 1550 pr_debug("PM: Image device not initialised\n");
61159a31
RW
1551 return;
1552 }
1553
8a0d613f 1554 blkdev_put(hib_resume_bdev, mode);
61159a31 1555}
1b29c164 1556
62c552cc
BS
1557/**
1558 * swsusp_unmark - Unmark swsusp signature in the resume device
1559 */
1560
1561#ifdef CONFIG_SUSPEND
1562int swsusp_unmark(void)
1563{
1564 int error;
1565
343df3c7 1566 hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
62c552cc
BS
1567 if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
1568 memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
343df3c7 1569 error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
62c552cc
BS
1570 swsusp_header, NULL);
1571 } else {
1572 printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
1573 error = -ENODEV;
1574 }
1575
1576 /*
1577 * We just returned from suspend, we don't need the image any more.
1578 */
1579 free_all_swap_pages(root_swap);
1580
1581 return error;
1582}
1583#endif
1584
1b29c164
VG
1585static int swsusp_header_init(void)
1586{
1587 swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
1588 if (!swsusp_header)
1589 panic("Could not allocate memory for swsusp_header\n");
1590 return 0;
1591}
1592
1593core_initcall(swsusp_header_init);
This page took 0.768378 seconds and 5 git commands to generate.