nilfs2: clean up old e-mail addresses
[deliverable/linux.git] / fs / nilfs2 / recovery.c
1 /*
2 * recovery.c - NILFS recovery logic
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * Written by Ryusuke Konishi.
17 */
18
19 #include <linux/buffer_head.h>
20 #include <linux/blkdev.h>
21 #include <linux/swap.h>
22 #include <linux/slab.h>
23 #include <linux/crc32.h>
24 #include "nilfs.h"
25 #include "segment.h"
26 #include "sufile.h"
27 #include "page.h"
28 #include "segbuf.h"
29
30 /*
31 * Segment check result
32 */
33 enum {
34 NILFS_SEG_VALID,
35 NILFS_SEG_NO_SUPER_ROOT,
36 NILFS_SEG_FAIL_IO,
37 NILFS_SEG_FAIL_MAGIC,
38 NILFS_SEG_FAIL_SEQ,
39 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
40 NILFS_SEG_FAIL_CHECKSUM_FULL,
41 NILFS_SEG_FAIL_CONSISTENCY,
42 };
43
44 /* work structure for recovery */
45 struct nilfs_recovery_block {
46 ino_t ino; /* Inode number of the file that this block
47 belongs to */
48 sector_t blocknr; /* block number */
49 __u64 vblocknr; /* virtual block number */
50 unsigned long blkoff; /* File offset of the data block (per block) */
51 struct list_head list;
52 };
53
54
55 static int nilfs_warn_segment_error(int err)
56 {
57 switch (err) {
58 case NILFS_SEG_FAIL_IO:
59 printk(KERN_WARNING
60 "NILFS warning: I/O error on loading last segment\n");
61 return -EIO;
62 case NILFS_SEG_FAIL_MAGIC:
63 printk(KERN_WARNING
64 "NILFS warning: Segment magic number invalid\n");
65 break;
66 case NILFS_SEG_FAIL_SEQ:
67 printk(KERN_WARNING
68 "NILFS warning: Sequence number mismatch\n");
69 break;
70 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
71 printk(KERN_WARNING
72 "NILFS warning: Checksum error in super root\n");
73 break;
74 case NILFS_SEG_FAIL_CHECKSUM_FULL:
75 printk(KERN_WARNING
76 "NILFS warning: Checksum error in segment payload\n");
77 break;
78 case NILFS_SEG_FAIL_CONSISTENCY:
79 printk(KERN_WARNING
80 "NILFS warning: Inconsistent segment\n");
81 break;
82 case NILFS_SEG_NO_SUPER_ROOT:
83 printk(KERN_WARNING
84 "NILFS warning: No super root in the last segment\n");
85 break;
86 }
87 return -EINVAL;
88 }
89
90 /**
91 * nilfs_compute_checksum - compute checksum of blocks continuously
92 * @nilfs: nilfs object
93 * @bhs: buffer head of start block
94 * @sum: place to store result
95 * @offset: offset bytes in the first block
96 * @check_bytes: number of bytes to be checked
97 * @start: DBN of start block
98 * @nblock: number of blocks to be checked
99 */
100 static int nilfs_compute_checksum(struct the_nilfs *nilfs,
101 struct buffer_head *bhs, u32 *sum,
102 unsigned long offset, u64 check_bytes,
103 sector_t start, unsigned long nblock)
104 {
105 unsigned int blocksize = nilfs->ns_blocksize;
106 unsigned long size;
107 u32 crc;
108
109 BUG_ON(offset >= blocksize);
110 check_bytes -= offset;
111 size = min_t(u64, check_bytes, blocksize - offset);
112 crc = crc32_le(nilfs->ns_crc_seed,
113 (unsigned char *)bhs->b_data + offset, size);
114 if (--nblock > 0) {
115 do {
116 struct buffer_head *bh;
117
118 bh = __bread(nilfs->ns_bdev, ++start, blocksize);
119 if (!bh)
120 return -EIO;
121 check_bytes -= size;
122 size = min_t(u64, check_bytes, blocksize);
123 crc = crc32_le(crc, bh->b_data, size);
124 brelse(bh);
125 } while (--nblock > 0);
126 }
127 *sum = crc;
128 return 0;
129 }
130
131 /**
132 * nilfs_read_super_root_block - read super root block
133 * @nilfs: nilfs object
134 * @sr_block: disk block number of the super root block
135 * @pbh: address of a buffer_head pointer to return super root buffer
136 * @check: CRC check flag
137 */
138 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
139 struct buffer_head **pbh, int check)
140 {
141 struct buffer_head *bh_sr;
142 struct nilfs_super_root *sr;
143 u32 crc;
144 int ret;
145
146 *pbh = NULL;
147 bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize);
148 if (unlikely(!bh_sr)) {
149 ret = NILFS_SEG_FAIL_IO;
150 goto failed;
151 }
152
153 sr = (struct nilfs_super_root *)bh_sr->b_data;
154 if (check) {
155 unsigned bytes = le16_to_cpu(sr->sr_bytes);
156
157 if (bytes == 0 || bytes > nilfs->ns_blocksize) {
158 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
159 goto failed_bh;
160 }
161 if (nilfs_compute_checksum(
162 nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes,
163 sr_block, 1)) {
164 ret = NILFS_SEG_FAIL_IO;
165 goto failed_bh;
166 }
167 if (crc != le32_to_cpu(sr->sr_sum)) {
168 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
169 goto failed_bh;
170 }
171 }
172 *pbh = bh_sr;
173 return 0;
174
175 failed_bh:
176 brelse(bh_sr);
177
178 failed:
179 return nilfs_warn_segment_error(ret);
180 }
181
182 /**
183 * nilfs_read_log_header - read summary header of the specified log
184 * @nilfs: nilfs object
185 * @start_blocknr: start block number of the log
186 * @sum: pointer to return segment summary structure
187 */
188 static struct buffer_head *
189 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
190 struct nilfs_segment_summary **sum)
191 {
192 struct buffer_head *bh_sum;
193
194 bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
195 if (bh_sum)
196 *sum = (struct nilfs_segment_summary *)bh_sum->b_data;
197 return bh_sum;
198 }
199
200 /**
201 * nilfs_validate_log - verify consistency of log
202 * @nilfs: nilfs object
203 * @seg_seq: sequence number of segment
204 * @bh_sum: buffer head of summary block
205 * @sum: segment summary struct
206 */
207 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
208 struct buffer_head *bh_sum,
209 struct nilfs_segment_summary *sum)
210 {
211 unsigned long nblock;
212 u32 crc;
213 int ret;
214
215 ret = NILFS_SEG_FAIL_MAGIC;
216 if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC)
217 goto out;
218
219 ret = NILFS_SEG_FAIL_SEQ;
220 if (le64_to_cpu(sum->ss_seq) != seg_seq)
221 goto out;
222
223 nblock = le32_to_cpu(sum->ss_nblocks);
224 ret = NILFS_SEG_FAIL_CONSISTENCY;
225 if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment))
226 /* This limits the number of blocks read in the CRC check */
227 goto out;
228
229 ret = NILFS_SEG_FAIL_IO;
230 if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum),
231 ((u64)nblock << nilfs->ns_blocksize_bits),
232 bh_sum->b_blocknr, nblock))
233 goto out;
234
235 ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
236 if (crc != le32_to_cpu(sum->ss_datasum))
237 goto out;
238 ret = 0;
239 out:
240 return ret;
241 }
242
243 /**
244 * nilfs_read_summary_info - read an item on summary blocks of a log
245 * @nilfs: nilfs object
246 * @pbh: the current buffer head on summary blocks [in, out]
247 * @offset: the current byte offset on summary blocks [in, out]
248 * @bytes: byte size of the item to be read
249 */
250 static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
251 struct buffer_head **pbh,
252 unsigned int *offset, unsigned int bytes)
253 {
254 void *ptr;
255 sector_t blocknr;
256
257 BUG_ON((*pbh)->b_size < *offset);
258 if (bytes > (*pbh)->b_size - *offset) {
259 blocknr = (*pbh)->b_blocknr;
260 brelse(*pbh);
261 *pbh = __bread(nilfs->ns_bdev, blocknr + 1,
262 nilfs->ns_blocksize);
263 if (unlikely(!*pbh))
264 return NULL;
265 *offset = 0;
266 }
267 ptr = (*pbh)->b_data + *offset;
268 *offset += bytes;
269 return ptr;
270 }
271
272 /**
273 * nilfs_skip_summary_info - skip items on summary blocks of a log
274 * @nilfs: nilfs object
275 * @pbh: the current buffer head on summary blocks [in, out]
276 * @offset: the current byte offset on summary blocks [in, out]
277 * @bytes: byte size of the item to be skipped
278 * @count: number of items to be skipped
279 */
280 static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
281 struct buffer_head **pbh,
282 unsigned int *offset, unsigned int bytes,
283 unsigned long count)
284 {
285 unsigned int rest_item_in_current_block
286 = ((*pbh)->b_size - *offset) / bytes;
287
288 if (count <= rest_item_in_current_block) {
289 *offset += bytes * count;
290 } else {
291 sector_t blocknr = (*pbh)->b_blocknr;
292 unsigned int nitem_per_block = (*pbh)->b_size / bytes;
293 unsigned int bcnt;
294
295 count -= rest_item_in_current_block;
296 bcnt = DIV_ROUND_UP(count, nitem_per_block);
297 *offset = bytes * (count - (bcnt - 1) * nitem_per_block);
298
299 brelse(*pbh);
300 *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt,
301 nilfs->ns_blocksize);
302 }
303 }
304
305 /**
306 * nilfs_scan_dsync_log - get block information of a log written for data sync
307 * @nilfs: nilfs object
308 * @start_blocknr: start block number of the log
309 * @sum: log summary information
310 * @head: list head to add nilfs_recovery_block struct
311 */
312 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
313 struct nilfs_segment_summary *sum,
314 struct list_head *head)
315 {
316 struct buffer_head *bh;
317 unsigned int offset;
318 u32 nfinfo, sumbytes;
319 sector_t blocknr;
320 ino_t ino;
321 int err = -EIO;
322
323 nfinfo = le32_to_cpu(sum->ss_nfinfo);
324 if (!nfinfo)
325 return 0;
326
327 sumbytes = le32_to_cpu(sum->ss_sumbytes);
328 blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize);
329 bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
330 if (unlikely(!bh))
331 goto out;
332
333 offset = le16_to_cpu(sum->ss_bytes);
334 for (;;) {
335 unsigned long nblocks, ndatablk, nnodeblk;
336 struct nilfs_finfo *finfo;
337
338 finfo = nilfs_read_summary_info(nilfs, &bh, &offset,
339 sizeof(*finfo));
340 if (unlikely(!finfo))
341 goto out;
342
343 ino = le64_to_cpu(finfo->fi_ino);
344 nblocks = le32_to_cpu(finfo->fi_nblocks);
345 ndatablk = le32_to_cpu(finfo->fi_ndatablk);
346 nnodeblk = nblocks - ndatablk;
347
348 while (ndatablk-- > 0) {
349 struct nilfs_recovery_block *rb;
350 struct nilfs_binfo_v *binfo;
351
352 binfo = nilfs_read_summary_info(nilfs, &bh, &offset,
353 sizeof(*binfo));
354 if (unlikely(!binfo))
355 goto out;
356
357 rb = kmalloc(sizeof(*rb), GFP_NOFS);
358 if (unlikely(!rb)) {
359 err = -ENOMEM;
360 goto out;
361 }
362 rb->ino = ino;
363 rb->blocknr = blocknr++;
364 rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
365 rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
366 /* INIT_LIST_HEAD(&rb->list); */
367 list_add_tail(&rb->list, head);
368 }
369 if (--nfinfo == 0)
370 break;
371 blocknr += nnodeblk; /* always 0 for data sync logs */
372 nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64),
373 nnodeblk);
374 if (unlikely(!bh))
375 goto out;
376 }
377 err = 0;
378 out:
379 brelse(bh); /* brelse(NULL) is just ignored */
380 return err;
381 }
382
383 static void dispose_recovery_list(struct list_head *head)
384 {
385 while (!list_empty(head)) {
386 struct nilfs_recovery_block *rb;
387
388 rb = list_first_entry(head, struct nilfs_recovery_block, list);
389 list_del(&rb->list);
390 kfree(rb);
391 }
392 }
393
394 struct nilfs_segment_entry {
395 struct list_head list;
396 __u64 segnum;
397 };
398
399 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
400 {
401 struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS);
402
403 if (unlikely(!ent))
404 return -ENOMEM;
405
406 ent->segnum = segnum;
407 INIT_LIST_HEAD(&ent->list);
408 list_add_tail(&ent->list, head);
409 return 0;
410 }
411
412 void nilfs_dispose_segment_list(struct list_head *head)
413 {
414 while (!list_empty(head)) {
415 struct nilfs_segment_entry *ent;
416
417 ent = list_first_entry(head, struct nilfs_segment_entry, list);
418 list_del(&ent->list);
419 kfree(ent);
420 }
421 }
422
423 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
424 struct super_block *sb,
425 struct nilfs_recovery_info *ri)
426 {
427 struct list_head *head = &ri->ri_used_segments;
428 struct nilfs_segment_entry *ent, *n;
429 struct inode *sufile = nilfs->ns_sufile;
430 __u64 segnum[4];
431 int err;
432 int i;
433
434 segnum[0] = nilfs->ns_segnum;
435 segnum[1] = nilfs->ns_nextnum;
436 segnum[2] = ri->ri_segnum;
437 segnum[3] = ri->ri_nextnum;
438
439 /*
440 * Releasing the next segment of the latest super root.
441 * The next segment is invalidated by this recovery.
442 */
443 err = nilfs_sufile_free(sufile, segnum[1]);
444 if (unlikely(err))
445 goto failed;
446
447 for (i = 1; i < 4; i++) {
448 err = nilfs_segment_list_add(head, segnum[i]);
449 if (unlikely(err))
450 goto failed;
451 }
452
453 /*
454 * Collecting segments written after the latest super root.
455 * These are marked dirty to avoid being reallocated in the next write.
456 */
457 list_for_each_entry_safe(ent, n, head, list) {
458 if (ent->segnum != segnum[0]) {
459 err = nilfs_sufile_scrap(sufile, ent->segnum);
460 if (unlikely(err))
461 goto failed;
462 }
463 list_del(&ent->list);
464 kfree(ent);
465 }
466
467 /* Allocate new segments for recovery */
468 err = nilfs_sufile_alloc(sufile, &segnum[0]);
469 if (unlikely(err))
470 goto failed;
471
472 nilfs->ns_pseg_offset = 0;
473 nilfs->ns_seg_seq = ri->ri_seq + 2;
474 nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
475
476 failed:
477 /* No need to recover sufile because it will be destroyed on error */
478 return err;
479 }
480
481 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
482 struct nilfs_recovery_block *rb,
483 struct page *page)
484 {
485 struct buffer_head *bh_org;
486 void *kaddr;
487
488 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
489 if (unlikely(!bh_org))
490 return -EIO;
491
492 kaddr = kmap_atomic(page);
493 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
494 kunmap_atomic(kaddr);
495 brelse(bh_org);
496 return 0;
497 }
498
499 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
500 struct super_block *sb,
501 struct nilfs_root *root,
502 struct list_head *head,
503 unsigned long *nr_salvaged_blocks)
504 {
505 struct inode *inode;
506 struct nilfs_recovery_block *rb, *n;
507 unsigned blocksize = nilfs->ns_blocksize;
508 struct page *page;
509 loff_t pos;
510 int err = 0, err2 = 0;
511
512 list_for_each_entry_safe(rb, n, head, list) {
513 inode = nilfs_iget(sb, root, rb->ino);
514 if (IS_ERR(inode)) {
515 err = PTR_ERR(inode);
516 inode = NULL;
517 goto failed_inode;
518 }
519
520 pos = rb->blkoff << inode->i_blkbits;
521 err = block_write_begin(inode->i_mapping, pos, blocksize,
522 0, &page, nilfs_get_block);
523 if (unlikely(err)) {
524 loff_t isize = inode->i_size;
525 if (pos + blocksize > isize)
526 nilfs_write_failed(inode->i_mapping,
527 pos + blocksize);
528 goto failed_inode;
529 }
530
531 err = nilfs_recovery_copy_block(nilfs, rb, page);
532 if (unlikely(err))
533 goto failed_page;
534
535 err = nilfs_set_file_dirty(inode, 1);
536 if (unlikely(err))
537 goto failed_page;
538
539 block_write_end(NULL, inode->i_mapping, pos, blocksize,
540 blocksize, page, NULL);
541
542 unlock_page(page);
543 put_page(page);
544
545 (*nr_salvaged_blocks)++;
546 goto next;
547
548 failed_page:
549 unlock_page(page);
550 put_page(page);
551
552 failed_inode:
553 printk(KERN_WARNING
554 "NILFS warning: error recovering data block "
555 "(err=%d, ino=%lu, block-offset=%llu)\n",
556 err, (unsigned long)rb->ino,
557 (unsigned long long)rb->blkoff);
558 if (!err2)
559 err2 = err;
560 next:
561 iput(inode); /* iput(NULL) is just ignored */
562 list_del_init(&rb->list);
563 kfree(rb);
564 }
565 return err2;
566 }
567
568 /**
569 * nilfs_do_roll_forward - salvage logical segments newer than the latest
570 * checkpoint
571 * @nilfs: nilfs object
572 * @sb: super block instance
573 * @ri: pointer to a nilfs_recovery_info
574 */
575 static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
576 struct super_block *sb,
577 struct nilfs_root *root,
578 struct nilfs_recovery_info *ri)
579 {
580 struct buffer_head *bh_sum = NULL;
581 struct nilfs_segment_summary *sum = NULL;
582 sector_t pseg_start;
583 sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */
584 unsigned long nsalvaged_blocks = 0;
585 unsigned int flags;
586 u64 seg_seq;
587 __u64 segnum, nextnum = 0;
588 int empty_seg = 0;
589 int err = 0, ret;
590 LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */
591 enum {
592 RF_INIT_ST,
593 RF_DSYNC_ST, /* scanning data-sync segments */
594 };
595 int state = RF_INIT_ST;
596
597 pseg_start = ri->ri_lsegs_start;
598 seg_seq = ri->ri_lsegs_start_seq;
599 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
600 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
601
602 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
603 brelse(bh_sum);
604 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
605 if (!bh_sum) {
606 err = -EIO;
607 goto failed;
608 }
609
610 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
611 if (ret) {
612 if (ret == NILFS_SEG_FAIL_IO) {
613 err = -EIO;
614 goto failed;
615 }
616 goto strayed;
617 }
618
619 flags = le16_to_cpu(sum->ss_flags);
620 if (flags & NILFS_SS_SR)
621 goto confused;
622
623 /* Found a valid partial segment; do recovery actions */
624 nextnum = nilfs_get_segnum_of_block(nilfs,
625 le64_to_cpu(sum->ss_next));
626 empty_seg = 0;
627 nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
628 if (!(flags & NILFS_SS_GC))
629 nilfs->ns_nongc_ctime = nilfs->ns_ctime;
630
631 switch (state) {
632 case RF_INIT_ST:
633 if (!(flags & NILFS_SS_LOGBGN) ||
634 !(flags & NILFS_SS_SYNDT))
635 goto try_next_pseg;
636 state = RF_DSYNC_ST;
637 /* Fall through */
638 case RF_DSYNC_ST:
639 if (!(flags & NILFS_SS_SYNDT))
640 goto confused;
641
642 err = nilfs_scan_dsync_log(nilfs, pseg_start, sum,
643 &dsync_blocks);
644 if (unlikely(err))
645 goto failed;
646 if (flags & NILFS_SS_LOGEND) {
647 err = nilfs_recover_dsync_blocks(
648 nilfs, sb, root, &dsync_blocks,
649 &nsalvaged_blocks);
650 if (unlikely(err))
651 goto failed;
652 state = RF_INIT_ST;
653 }
654 break; /* Fall through to try_next_pseg */
655 }
656
657 try_next_pseg:
658 if (pseg_start == ri->ri_lsegs_end)
659 break;
660 pseg_start += le32_to_cpu(sum->ss_nblocks);
661 if (pseg_start < seg_end)
662 continue;
663 goto feed_segment;
664
665 strayed:
666 if (pseg_start == ri->ri_lsegs_end)
667 break;
668
669 feed_segment:
670 /* Looking to the next full segment */
671 if (empty_seg++)
672 break;
673 seg_seq++;
674 segnum = nextnum;
675 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
676 pseg_start = seg_start;
677 }
678
679 if (nsalvaged_blocks) {
680 printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
681 sb->s_id, nsalvaged_blocks);
682 ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
683 }
684 out:
685 brelse(bh_sum);
686 dispose_recovery_list(&dsync_blocks);
687 return err;
688
689 confused:
690 err = -EINVAL;
691 failed:
692 printk(KERN_ERR
693 "NILFS (device %s): Error roll-forwarding "
694 "(err=%d, pseg block=%llu). ",
695 sb->s_id, err, (unsigned long long)pseg_start);
696 goto out;
697 }
698
699 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
700 struct nilfs_recovery_info *ri)
701 {
702 struct buffer_head *bh;
703 int err;
704
705 if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
706 nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
707 return;
708
709 bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
710 BUG_ON(!bh);
711 memset(bh->b_data, 0, bh->b_size);
712 set_buffer_dirty(bh);
713 err = sync_dirty_buffer(bh);
714 if (unlikely(err))
715 printk(KERN_WARNING
716 "NILFS warning: buffer sync write failed during "
717 "post-cleaning of recovery.\n");
718 brelse(bh);
719 }
720
721 /**
722 * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
723 * @nilfs: nilfs object
724 * @sb: super block instance
725 * @ri: pointer to a nilfs_recovery_info struct to store search results.
726 *
727 * Return Value: On success, 0 is returned. On error, one of the following
728 * negative error code is returned.
729 *
730 * %-EINVAL - Inconsistent filesystem state.
731 *
732 * %-EIO - I/O error
733 *
734 * %-ENOSPC - No space left on device (only in a panic state).
735 *
736 * %-ERESTARTSYS - Interrupted.
737 *
738 * %-ENOMEM - Insufficient memory available.
739 */
740 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
741 struct super_block *sb,
742 struct nilfs_recovery_info *ri)
743 {
744 struct nilfs_root *root;
745 int err;
746
747 if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
748 return 0;
749
750 err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
751 if (unlikely(err)) {
752 printk(KERN_ERR
753 "NILFS: error loading the latest checkpoint.\n");
754 return err;
755 }
756
757 err = nilfs_do_roll_forward(nilfs, sb, root, ri);
758 if (unlikely(err))
759 goto failed;
760
761 if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
762 err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
763 if (unlikely(err)) {
764 printk(KERN_ERR "NILFS: Error preparing segments for "
765 "recovery.\n");
766 goto failed;
767 }
768
769 err = nilfs_attach_log_writer(sb, root);
770 if (unlikely(err))
771 goto failed;
772
773 set_nilfs_discontinued(nilfs);
774 err = nilfs_construct_segment(sb);
775 nilfs_detach_log_writer(sb);
776
777 if (unlikely(err)) {
778 printk(KERN_ERR "NILFS: Oops! recovery failed. "
779 "(err=%d)\n", err);
780 goto failed;
781 }
782
783 nilfs_finish_roll_forward(nilfs, ri);
784 }
785
786 failed:
787 nilfs_put_root(root);
788 return err;
789 }
790
791 /**
792 * nilfs_search_super_root - search the latest valid super root
793 * @nilfs: the_nilfs
794 * @ri: pointer to a nilfs_recovery_info struct to store search results.
795 *
796 * nilfs_search_super_root() looks for the latest super-root from a partial
797 * segment pointed by the superblock. It sets up struct the_nilfs through
798 * this search. It fills nilfs_recovery_info (ri) required for recovery.
799 *
800 * Return Value: On success, 0 is returned. On error, one of the following
801 * negative error code is returned.
802 *
803 * %-EINVAL - No valid segment found
804 *
805 * %-EIO - I/O error
806 *
807 * %-ENOMEM - Insufficient memory available.
808 */
809 int nilfs_search_super_root(struct the_nilfs *nilfs,
810 struct nilfs_recovery_info *ri)
811 {
812 struct buffer_head *bh_sum = NULL;
813 struct nilfs_segment_summary *sum = NULL;
814 sector_t pseg_start, pseg_end, sr_pseg_start = 0;
815 sector_t seg_start, seg_end; /* range of full segment (block number) */
816 sector_t b, end;
817 unsigned long nblocks;
818 unsigned int flags;
819 u64 seg_seq;
820 __u64 segnum, nextnum = 0;
821 __u64 cno;
822 LIST_HEAD(segments);
823 int empty_seg = 0, scan_newer = 0;
824 int ret;
825
826 pseg_start = nilfs->ns_last_pseg;
827 seg_seq = nilfs->ns_last_seq;
828 cno = nilfs->ns_last_cno;
829 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
830
831 /* Calculate range of segment */
832 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
833
834 /* Read ahead segment */
835 b = seg_start;
836 while (b <= seg_end)
837 __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize);
838
839 for (;;) {
840 brelse(bh_sum);
841 ret = NILFS_SEG_FAIL_IO;
842 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
843 if (!bh_sum)
844 goto failed;
845
846 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
847 if (ret) {
848 if (ret == NILFS_SEG_FAIL_IO)
849 goto failed;
850 goto strayed;
851 }
852
853 nblocks = le32_to_cpu(sum->ss_nblocks);
854 pseg_end = pseg_start + nblocks - 1;
855 if (unlikely(pseg_end > seg_end)) {
856 ret = NILFS_SEG_FAIL_CONSISTENCY;
857 goto strayed;
858 }
859
860 /* A valid partial segment */
861 ri->ri_pseg_start = pseg_start;
862 ri->ri_seq = seg_seq;
863 ri->ri_segnum = segnum;
864 nextnum = nilfs_get_segnum_of_block(nilfs,
865 le64_to_cpu(sum->ss_next));
866 ri->ri_nextnum = nextnum;
867 empty_seg = 0;
868
869 flags = le16_to_cpu(sum->ss_flags);
870 if (!(flags & NILFS_SS_SR) && !scan_newer) {
871 /* This will never happen because a superblock
872 (last_segment) always points to a pseg
873 having a super root. */
874 ret = NILFS_SEG_FAIL_CONSISTENCY;
875 goto failed;
876 }
877
878 if (pseg_start == seg_start) {
879 nilfs_get_segment_range(nilfs, nextnum, &b, &end);
880 while (b <= end)
881 __breadahead(nilfs->ns_bdev, b++,
882 nilfs->ns_blocksize);
883 }
884 if (!(flags & NILFS_SS_SR)) {
885 if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) {
886 ri->ri_lsegs_start = pseg_start;
887 ri->ri_lsegs_start_seq = seg_seq;
888 }
889 if (flags & NILFS_SS_LOGEND)
890 ri->ri_lsegs_end = pseg_start;
891 goto try_next_pseg;
892 }
893
894 /* A valid super root was found. */
895 ri->ri_cno = cno++;
896 ri->ri_super_root = pseg_end;
897 ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
898
899 nilfs_dispose_segment_list(&segments);
900 sr_pseg_start = pseg_start;
901 nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start;
902 nilfs->ns_seg_seq = seg_seq;
903 nilfs->ns_segnum = segnum;
904 nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */
905 nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
906 nilfs->ns_nextnum = nextnum;
907
908 if (scan_newer)
909 ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
910 else {
911 if (nilfs->ns_mount_state & NILFS_VALID_FS)
912 goto super_root_found;
913 scan_newer = 1;
914 }
915
916 try_next_pseg:
917 /* Standing on a course, or met an inconsistent state */
918 pseg_start += nblocks;
919 if (pseg_start < seg_end)
920 continue;
921 goto feed_segment;
922
923 strayed:
924 /* Off the trail */
925 if (!scan_newer)
926 /*
927 * This can happen if a checkpoint was written without
928 * barriers, or as a result of an I/O failure.
929 */
930 goto failed;
931
932 feed_segment:
933 /* Looking to the next full segment */
934 if (empty_seg++)
935 goto super_root_found; /* found a valid super root */
936
937 ret = nilfs_segment_list_add(&segments, segnum);
938 if (unlikely(ret))
939 goto failed;
940
941 seg_seq++;
942 segnum = nextnum;
943 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
944 pseg_start = seg_start;
945 }
946
947 super_root_found:
948 /* Updating pointers relating to the latest checkpoint */
949 brelse(bh_sum);
950 list_splice_tail(&segments, &ri->ri_used_segments);
951 nilfs->ns_last_pseg = sr_pseg_start;
952 nilfs->ns_last_seq = nilfs->ns_seg_seq;
953 nilfs->ns_last_cno = ri->ri_cno;
954 return 0;
955
956 failed:
957 brelse(bh_sum);
958 nilfs_dispose_segment_list(&segments);
959 return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
960 }
This page took 0.058846 seconds and 6 git commands to generate.