xfs: log recovery lsn ordering needs uuid check
[deliverable/linux.git] / fs / xfs / xfs_log_recover.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_log_priv.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_log_recover.h"
41 #include "xfs_extfree_item.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_quota.h"
44 #include "xfs_cksum.h"
45 #include "xfs_trace.h"
46 #include "xfs_icache.h"
47 #include "xfs_icreate_item.h"
48
49 /* Need all the magic numbers and buffer ops structures from these headers */
50 #include "xfs_symlink.h"
51 #include "xfs_da_btree.h"
52 #include "xfs_dir2_format.h"
53 #include "xfs_dir2.h"
54 #include "xfs_attr_leaf.h"
55 #include "xfs_attr_remote.h"
56
57 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
58
59 STATIC int
60 xlog_find_zeroed(
61 struct xlog *,
62 xfs_daddr_t *);
63 STATIC int
64 xlog_clear_stale_blocks(
65 struct xlog *,
66 xfs_lsn_t);
67 #if defined(DEBUG)
68 STATIC void
69 xlog_recover_check_summary(
70 struct xlog *);
71 #else
72 #define xlog_recover_check_summary(log)
73 #endif
74
75 /*
76 * This structure is used during recovery to record the buf log items which
77 * have been canceled and should not be replayed.
78 */
79 struct xfs_buf_cancel {
80 xfs_daddr_t bc_blkno;
81 uint bc_len;
82 int bc_refcount;
83 struct list_head bc_list;
84 };
85
86 /*
87 * Sector aligned buffer routines for buffer create/read/write/access
88 */
89
90 /*
91 * Verify the given count of basic blocks is valid number of blocks
92 * to specify for an operation involving the given XFS log buffer.
93 * Returns nonzero if the count is valid, 0 otherwise.
94 */
95
96 static inline int
97 xlog_buf_bbcount_valid(
98 struct xlog *log,
99 int bbcount)
100 {
101 return bbcount > 0 && bbcount <= log->l_logBBsize;
102 }
103
104 /*
105 * Allocate a buffer to hold log data. The buffer needs to be able
106 * to map to a range of nbblks basic blocks at any valid (basic
107 * block) offset within the log.
108 */
109 STATIC xfs_buf_t *
110 xlog_get_bp(
111 struct xlog *log,
112 int nbblks)
113 {
114 struct xfs_buf *bp;
115
116 if (!xlog_buf_bbcount_valid(log, nbblks)) {
117 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
118 nbblks);
119 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
120 return NULL;
121 }
122
123 /*
124 * We do log I/O in units of log sectors (a power-of-2
125 * multiple of the basic block size), so we round up the
126 * requested size to accommodate the basic blocks required
127 * for complete log sectors.
128 *
129 * In addition, the buffer may be used for a non-sector-
130 * aligned block offset, in which case an I/O of the
131 * requested size could extend beyond the end of the
132 * buffer. If the requested size is only 1 basic block it
133 * will never straddle a sector boundary, so this won't be
134 * an issue. Nor will this be a problem if the log I/O is
135 * done in basic blocks (sector size 1). But otherwise we
136 * extend the buffer by one extra log sector to ensure
137 * there's space to accommodate this possibility.
138 */
139 if (nbblks > 1 && log->l_sectBBsize > 1)
140 nbblks += log->l_sectBBsize;
141 nbblks = round_up(nbblks, log->l_sectBBsize);
142
143 bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, nbblks, 0);
144 if (bp)
145 xfs_buf_unlock(bp);
146 return bp;
147 }
148
149 STATIC void
150 xlog_put_bp(
151 xfs_buf_t *bp)
152 {
153 xfs_buf_free(bp);
154 }
155
156 /*
157 * Return the address of the start of the given block number's data
158 * in a log buffer. The buffer covers a log sector-aligned region.
159 */
160 STATIC xfs_caddr_t
161 xlog_align(
162 struct xlog *log,
163 xfs_daddr_t blk_no,
164 int nbblks,
165 struct xfs_buf *bp)
166 {
167 xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1);
168
169 ASSERT(offset + nbblks <= bp->b_length);
170 return bp->b_addr + BBTOB(offset);
171 }
172
173
174 /*
175 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
176 */
177 STATIC int
178 xlog_bread_noalign(
179 struct xlog *log,
180 xfs_daddr_t blk_no,
181 int nbblks,
182 struct xfs_buf *bp)
183 {
184 int error;
185
186 if (!xlog_buf_bbcount_valid(log, nbblks)) {
187 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
188 nbblks);
189 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
190 return EFSCORRUPTED;
191 }
192
193 blk_no = round_down(blk_no, log->l_sectBBsize);
194 nbblks = round_up(nbblks, log->l_sectBBsize);
195
196 ASSERT(nbblks > 0);
197 ASSERT(nbblks <= bp->b_length);
198
199 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
200 XFS_BUF_READ(bp);
201 bp->b_io_length = nbblks;
202 bp->b_error = 0;
203
204 xfsbdstrat(log->l_mp, bp);
205 error = xfs_buf_iowait(bp);
206 if (error)
207 xfs_buf_ioerror_alert(bp, __func__);
208 return error;
209 }
210
211 STATIC int
212 xlog_bread(
213 struct xlog *log,
214 xfs_daddr_t blk_no,
215 int nbblks,
216 struct xfs_buf *bp,
217 xfs_caddr_t *offset)
218 {
219 int error;
220
221 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
222 if (error)
223 return error;
224
225 *offset = xlog_align(log, blk_no, nbblks, bp);
226 return 0;
227 }
228
229 /*
230 * Read at an offset into the buffer. Returns with the buffer in it's original
231 * state regardless of the result of the read.
232 */
233 STATIC int
234 xlog_bread_offset(
235 struct xlog *log,
236 xfs_daddr_t blk_no, /* block to read from */
237 int nbblks, /* blocks to read */
238 struct xfs_buf *bp,
239 xfs_caddr_t offset)
240 {
241 xfs_caddr_t orig_offset = bp->b_addr;
242 int orig_len = BBTOB(bp->b_length);
243 int error, error2;
244
245 error = xfs_buf_associate_memory(bp, offset, BBTOB(nbblks));
246 if (error)
247 return error;
248
249 error = xlog_bread_noalign(log, blk_no, nbblks, bp);
250
251 /* must reset buffer pointer even on error */
252 error2 = xfs_buf_associate_memory(bp, orig_offset, orig_len);
253 if (error)
254 return error;
255 return error2;
256 }
257
258 /*
259 * Write out the buffer at the given block for the given number of blocks.
260 * The buffer is kept locked across the write and is returned locked.
261 * This can only be used for synchronous log writes.
262 */
263 STATIC int
264 xlog_bwrite(
265 struct xlog *log,
266 xfs_daddr_t blk_no,
267 int nbblks,
268 struct xfs_buf *bp)
269 {
270 int error;
271
272 if (!xlog_buf_bbcount_valid(log, nbblks)) {
273 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
274 nbblks);
275 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp);
276 return EFSCORRUPTED;
277 }
278
279 blk_no = round_down(blk_no, log->l_sectBBsize);
280 nbblks = round_up(nbblks, log->l_sectBBsize);
281
282 ASSERT(nbblks > 0);
283 ASSERT(nbblks <= bp->b_length);
284
285 XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
286 XFS_BUF_ZEROFLAGS(bp);
287 xfs_buf_hold(bp);
288 xfs_buf_lock(bp);
289 bp->b_io_length = nbblks;
290 bp->b_error = 0;
291
292 error = xfs_bwrite(bp);
293 if (error)
294 xfs_buf_ioerror_alert(bp, __func__);
295 xfs_buf_relse(bp);
296 return error;
297 }
298
299 #ifdef DEBUG
300 /*
301 * dump debug superblock and log record information
302 */
303 STATIC void
304 xlog_header_check_dump(
305 xfs_mount_t *mp,
306 xlog_rec_header_t *head)
307 {
308 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n",
309 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
310 xfs_debug(mp, " log : uuid = %pU, fmt = %d\n",
311 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
312 }
313 #else
314 #define xlog_header_check_dump(mp, head)
315 #endif
316
317 /*
318 * check log record header for recovery
319 */
320 STATIC int
321 xlog_header_check_recover(
322 xfs_mount_t *mp,
323 xlog_rec_header_t *head)
324 {
325 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
326
327 /*
328 * IRIX doesn't write the h_fmt field and leaves it zeroed
329 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
330 * a dirty log created in IRIX.
331 */
332 if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
333 xfs_warn(mp,
334 "dirty log written in incompatible format - can't recover");
335 xlog_header_check_dump(mp, head);
336 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
337 XFS_ERRLEVEL_HIGH, mp);
338 return XFS_ERROR(EFSCORRUPTED);
339 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
340 xfs_warn(mp,
341 "dirty log entry has mismatched uuid - can't recover");
342 xlog_header_check_dump(mp, head);
343 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
344 XFS_ERRLEVEL_HIGH, mp);
345 return XFS_ERROR(EFSCORRUPTED);
346 }
347 return 0;
348 }
349
350 /*
351 * read the head block of the log and check the header
352 */
353 STATIC int
354 xlog_header_check_mount(
355 xfs_mount_t *mp,
356 xlog_rec_header_t *head)
357 {
358 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
359
360 if (uuid_is_nil(&head->h_fs_uuid)) {
361 /*
362 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
363 * h_fs_uuid is nil, we assume this log was last mounted
364 * by IRIX and continue.
365 */
366 xfs_warn(mp, "nil uuid in log - IRIX style log");
367 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) {
368 xfs_warn(mp, "log has mismatched uuid - can't recover");
369 xlog_header_check_dump(mp, head);
370 XFS_ERROR_REPORT("xlog_header_check_mount",
371 XFS_ERRLEVEL_HIGH, mp);
372 return XFS_ERROR(EFSCORRUPTED);
373 }
374 return 0;
375 }
376
377 STATIC void
378 xlog_recover_iodone(
379 struct xfs_buf *bp)
380 {
381 if (bp->b_error) {
382 /*
383 * We're not going to bother about retrying
384 * this during recovery. One strike!
385 */
386 xfs_buf_ioerror_alert(bp, __func__);
387 xfs_force_shutdown(bp->b_target->bt_mount,
388 SHUTDOWN_META_IO_ERROR);
389 }
390 bp->b_iodone = NULL;
391 xfs_buf_ioend(bp, 0);
392 }
393
394 /*
395 * This routine finds (to an approximation) the first block in the physical
396 * log which contains the given cycle. It uses a binary search algorithm.
397 * Note that the algorithm can not be perfect because the disk will not
398 * necessarily be perfect.
399 */
400 STATIC int
401 xlog_find_cycle_start(
402 struct xlog *log,
403 struct xfs_buf *bp,
404 xfs_daddr_t first_blk,
405 xfs_daddr_t *last_blk,
406 uint cycle)
407 {
408 xfs_caddr_t offset;
409 xfs_daddr_t mid_blk;
410 xfs_daddr_t end_blk;
411 uint mid_cycle;
412 int error;
413
414 end_blk = *last_blk;
415 mid_blk = BLK_AVG(first_blk, end_blk);
416 while (mid_blk != first_blk && mid_blk != end_blk) {
417 error = xlog_bread(log, mid_blk, 1, bp, &offset);
418 if (error)
419 return error;
420 mid_cycle = xlog_get_cycle(offset);
421 if (mid_cycle == cycle)
422 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
423 else
424 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
425 mid_blk = BLK_AVG(first_blk, end_blk);
426 }
427 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
428 (mid_blk == end_blk && mid_blk-1 == first_blk));
429
430 *last_blk = end_blk;
431
432 return 0;
433 }
434
435 /*
436 * Check that a range of blocks does not contain stop_on_cycle_no.
437 * Fill in *new_blk with the block offset where such a block is
438 * found, or with -1 (an invalid block number) if there is no such
439 * block in the range. The scan needs to occur from front to back
440 * and the pointer into the region must be updated since a later
441 * routine will need to perform another test.
442 */
443 STATIC int
444 xlog_find_verify_cycle(
445 struct xlog *log,
446 xfs_daddr_t start_blk,
447 int nbblks,
448 uint stop_on_cycle_no,
449 xfs_daddr_t *new_blk)
450 {
451 xfs_daddr_t i, j;
452 uint cycle;
453 xfs_buf_t *bp;
454 xfs_daddr_t bufblks;
455 xfs_caddr_t buf = NULL;
456 int error = 0;
457
458 /*
459 * Greedily allocate a buffer big enough to handle the full
460 * range of basic blocks we'll be examining. If that fails,
461 * try a smaller size. We need to be able to read at least
462 * a log sector, or we're out of luck.
463 */
464 bufblks = 1 << ffs(nbblks);
465 while (bufblks > log->l_logBBsize)
466 bufblks >>= 1;
467 while (!(bp = xlog_get_bp(log, bufblks))) {
468 bufblks >>= 1;
469 if (bufblks < log->l_sectBBsize)
470 return ENOMEM;
471 }
472
473 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
474 int bcount;
475
476 bcount = min(bufblks, (start_blk + nbblks - i));
477
478 error = xlog_bread(log, i, bcount, bp, &buf);
479 if (error)
480 goto out;
481
482 for (j = 0; j < bcount; j++) {
483 cycle = xlog_get_cycle(buf);
484 if (cycle == stop_on_cycle_no) {
485 *new_blk = i+j;
486 goto out;
487 }
488
489 buf += BBSIZE;
490 }
491 }
492
493 *new_blk = -1;
494
495 out:
496 xlog_put_bp(bp);
497 return error;
498 }
499
500 /*
501 * Potentially backup over partial log record write.
502 *
503 * In the typical case, last_blk is the number of the block directly after
504 * a good log record. Therefore, we subtract one to get the block number
505 * of the last block in the given buffer. extra_bblks contains the number
506 * of blocks we would have read on a previous read. This happens when the
507 * last log record is split over the end of the physical log.
508 *
509 * extra_bblks is the number of blocks potentially verified on a previous
510 * call to this routine.
511 */
512 STATIC int
513 xlog_find_verify_log_record(
514 struct xlog *log,
515 xfs_daddr_t start_blk,
516 xfs_daddr_t *last_blk,
517 int extra_bblks)
518 {
519 xfs_daddr_t i;
520 xfs_buf_t *bp;
521 xfs_caddr_t offset = NULL;
522 xlog_rec_header_t *head = NULL;
523 int error = 0;
524 int smallmem = 0;
525 int num_blks = *last_blk - start_blk;
526 int xhdrs;
527
528 ASSERT(start_blk != 0 || *last_blk != start_blk);
529
530 if (!(bp = xlog_get_bp(log, num_blks))) {
531 if (!(bp = xlog_get_bp(log, 1)))
532 return ENOMEM;
533 smallmem = 1;
534 } else {
535 error = xlog_bread(log, start_blk, num_blks, bp, &offset);
536 if (error)
537 goto out;
538 offset += ((num_blks - 1) << BBSHIFT);
539 }
540
541 for (i = (*last_blk) - 1; i >= 0; i--) {
542 if (i < start_blk) {
543 /* valid log record not found */
544 xfs_warn(log->l_mp,
545 "Log inconsistent (didn't find previous header)");
546 ASSERT(0);
547 error = XFS_ERROR(EIO);
548 goto out;
549 }
550
551 if (smallmem) {
552 error = xlog_bread(log, i, 1, bp, &offset);
553 if (error)
554 goto out;
555 }
556
557 head = (xlog_rec_header_t *)offset;
558
559 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
560 break;
561
562 if (!smallmem)
563 offset -= BBSIZE;
564 }
565
566 /*
567 * We hit the beginning of the physical log & still no header. Return
568 * to caller. If caller can handle a return of -1, then this routine
569 * will be called again for the end of the physical log.
570 */
571 if (i == -1) {
572 error = -1;
573 goto out;
574 }
575
576 /*
577 * We have the final block of the good log (the first block
578 * of the log record _before_ the head. So we check the uuid.
579 */
580 if ((error = xlog_header_check_mount(log->l_mp, head)))
581 goto out;
582
583 /*
584 * We may have found a log record header before we expected one.
585 * last_blk will be the 1st block # with a given cycle #. We may end
586 * up reading an entire log record. In this case, we don't want to
587 * reset last_blk. Only when last_blk points in the middle of a log
588 * record do we update last_blk.
589 */
590 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
591 uint h_size = be32_to_cpu(head->h_size);
592
593 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE;
594 if (h_size % XLOG_HEADER_CYCLE_SIZE)
595 xhdrs++;
596 } else {
597 xhdrs = 1;
598 }
599
600 if (*last_blk - i + extra_bblks !=
601 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
602 *last_blk = i;
603
604 out:
605 xlog_put_bp(bp);
606 return error;
607 }
608
609 /*
610 * Head is defined to be the point of the log where the next log write
611 * could go. This means that incomplete LR writes at the end are
612 * eliminated when calculating the head. We aren't guaranteed that previous
613 * LR have complete transactions. We only know that a cycle number of
614 * current cycle number -1 won't be present in the log if we start writing
615 * from our current block number.
616 *
617 * last_blk contains the block number of the first block with a given
618 * cycle number.
619 *
620 * Return: zero if normal, non-zero if error.
621 */
622 STATIC int
623 xlog_find_head(
624 struct xlog *log,
625 xfs_daddr_t *return_head_blk)
626 {
627 xfs_buf_t *bp;
628 xfs_caddr_t offset;
629 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
630 int num_scan_bblks;
631 uint first_half_cycle, last_half_cycle;
632 uint stop_on_cycle;
633 int error, log_bbnum = log->l_logBBsize;
634
635 /* Is the end of the log device zeroed? */
636 if ((error = xlog_find_zeroed(log, &first_blk)) == -1) {
637 *return_head_blk = first_blk;
638
639 /* Is the whole lot zeroed? */
640 if (!first_blk) {
641 /* Linux XFS shouldn't generate totally zeroed logs -
642 * mkfs etc write a dummy unmount record to a fresh
643 * log so we can store the uuid in there
644 */
645 xfs_warn(log->l_mp, "totally zeroed log");
646 }
647
648 return 0;
649 } else if (error) {
650 xfs_warn(log->l_mp, "empty log check failed");
651 return error;
652 }
653
654 first_blk = 0; /* get cycle # of 1st block */
655 bp = xlog_get_bp(log, 1);
656 if (!bp)
657 return ENOMEM;
658
659 error = xlog_bread(log, 0, 1, bp, &offset);
660 if (error)
661 goto bp_err;
662
663 first_half_cycle = xlog_get_cycle(offset);
664
665 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
666 error = xlog_bread(log, last_blk, 1, bp, &offset);
667 if (error)
668 goto bp_err;
669
670 last_half_cycle = xlog_get_cycle(offset);
671 ASSERT(last_half_cycle != 0);
672
673 /*
674 * If the 1st half cycle number is equal to the last half cycle number,
675 * then the entire log is stamped with the same cycle number. In this
676 * case, head_blk can't be set to zero (which makes sense). The below
677 * math doesn't work out properly with head_blk equal to zero. Instead,
678 * we set it to log_bbnum which is an invalid block number, but this
679 * value makes the math correct. If head_blk doesn't changed through
680 * all the tests below, *head_blk is set to zero at the very end rather
681 * than log_bbnum. In a sense, log_bbnum and zero are the same block
682 * in a circular file.
683 */
684 if (first_half_cycle == last_half_cycle) {
685 /*
686 * In this case we believe that the entire log should have
687 * cycle number last_half_cycle. We need to scan backwards
688 * from the end verifying that there are no holes still
689 * containing last_half_cycle - 1. If we find such a hole,
690 * then the start of that hole will be the new head. The
691 * simple case looks like
692 * x | x ... | x - 1 | x
693 * Another case that fits this picture would be
694 * x | x + 1 | x ... | x
695 * In this case the head really is somewhere at the end of the
696 * log, as one of the latest writes at the beginning was
697 * incomplete.
698 * One more case is
699 * x | x + 1 | x ... | x - 1 | x
700 * This is really the combination of the above two cases, and
701 * the head has to end up at the start of the x-1 hole at the
702 * end of the log.
703 *
704 * In the 256k log case, we will read from the beginning to the
705 * end of the log and search for cycle numbers equal to x-1.
706 * We don't worry about the x+1 blocks that we encounter,
707 * because we know that they cannot be the head since the log
708 * started with x.
709 */
710 head_blk = log_bbnum;
711 stop_on_cycle = last_half_cycle - 1;
712 } else {
713 /*
714 * In this case we want to find the first block with cycle
715 * number matching last_half_cycle. We expect the log to be
716 * some variation on
717 * x + 1 ... | x ... | x
718 * The first block with cycle number x (last_half_cycle) will
719 * be where the new head belongs. First we do a binary search
720 * for the first occurrence of last_half_cycle. The binary
721 * search may not be totally accurate, so then we scan back
722 * from there looking for occurrences of last_half_cycle before
723 * us. If that backwards scan wraps around the beginning of
724 * the log, then we look for occurrences of last_half_cycle - 1
725 * at the end of the log. The cases we're looking for look
726 * like
727 * v binary search stopped here
728 * x + 1 ... | x | x + 1 | x ... | x
729 * ^ but we want to locate this spot
730 * or
731 * <---------> less than scan distance
732 * x + 1 ... | x ... | x - 1 | x
733 * ^ we want to locate this spot
734 */
735 stop_on_cycle = last_half_cycle;
736 if ((error = xlog_find_cycle_start(log, bp, first_blk,
737 &head_blk, last_half_cycle)))
738 goto bp_err;
739 }
740
741 /*
742 * Now validate the answer. Scan back some number of maximum possible
743 * blocks and make sure each one has the expected cycle number. The
744 * maximum is determined by the total possible amount of buffering
745 * in the in-core log. The following number can be made tighter if
746 * we actually look at the block size of the filesystem.
747 */
748 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
749 if (head_blk >= num_scan_bblks) {
750 /*
751 * We are guaranteed that the entire check can be performed
752 * in one buffer.
753 */
754 start_blk = head_blk - num_scan_bblks;
755 if ((error = xlog_find_verify_cycle(log,
756 start_blk, num_scan_bblks,
757 stop_on_cycle, &new_blk)))
758 goto bp_err;
759 if (new_blk != -1)
760 head_blk = new_blk;
761 } else { /* need to read 2 parts of log */
762 /*
763 * We are going to scan backwards in the log in two parts.
764 * First we scan the physical end of the log. In this part
765 * of the log, we are looking for blocks with cycle number
766 * last_half_cycle - 1.
767 * If we find one, then we know that the log starts there, as
768 * we've found a hole that didn't get written in going around
769 * the end of the physical log. The simple case for this is
770 * x + 1 ... | x ... | x - 1 | x
771 * <---------> less than scan distance
772 * If all of the blocks at the end of the log have cycle number
773 * last_half_cycle, then we check the blocks at the start of
774 * the log looking for occurrences of last_half_cycle. If we
775 * find one, then our current estimate for the location of the
776 * first occurrence of last_half_cycle is wrong and we move
777 * back to the hole we've found. This case looks like
778 * x + 1 ... | x | x + 1 | x ...
779 * ^ binary search stopped here
780 * Another case we need to handle that only occurs in 256k
781 * logs is
782 * x + 1 ... | x ... | x+1 | x ...
783 * ^ binary search stops here
784 * In a 256k log, the scan at the end of the log will see the
785 * x + 1 blocks. We need to skip past those since that is
786 * certainly not the head of the log. By searching for
787 * last_half_cycle-1 we accomplish that.
788 */
789 ASSERT(head_blk <= INT_MAX &&
790 (xfs_daddr_t) num_scan_bblks >= head_blk);
791 start_blk = log_bbnum - (num_scan_bblks - head_blk);
792 if ((error = xlog_find_verify_cycle(log, start_blk,
793 num_scan_bblks - (int)head_blk,
794 (stop_on_cycle - 1), &new_blk)))
795 goto bp_err;
796 if (new_blk != -1) {
797 head_blk = new_blk;
798 goto validate_head;
799 }
800
801 /*
802 * Scan beginning of log now. The last part of the physical
803 * log is good. This scan needs to verify that it doesn't find
804 * the last_half_cycle.
805 */
806 start_blk = 0;
807 ASSERT(head_blk <= INT_MAX);
808 if ((error = xlog_find_verify_cycle(log,
809 start_blk, (int)head_blk,
810 stop_on_cycle, &new_blk)))
811 goto bp_err;
812 if (new_blk != -1)
813 head_blk = new_blk;
814 }
815
816 validate_head:
817 /*
818 * Now we need to make sure head_blk is not pointing to a block in
819 * the middle of a log record.
820 */
821 num_scan_bblks = XLOG_REC_SHIFT(log);
822 if (head_blk >= num_scan_bblks) {
823 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
824
825 /* start ptr at last block ptr before head_blk */
826 if ((error = xlog_find_verify_log_record(log, start_blk,
827 &head_blk, 0)) == -1) {
828 error = XFS_ERROR(EIO);
829 goto bp_err;
830 } else if (error)
831 goto bp_err;
832 } else {
833 start_blk = 0;
834 ASSERT(head_blk <= INT_MAX);
835 if ((error = xlog_find_verify_log_record(log, start_blk,
836 &head_blk, 0)) == -1) {
837 /* We hit the beginning of the log during our search */
838 start_blk = log_bbnum - (num_scan_bblks - head_blk);
839 new_blk = log_bbnum;
840 ASSERT(start_blk <= INT_MAX &&
841 (xfs_daddr_t) log_bbnum-start_blk >= 0);
842 ASSERT(head_blk <= INT_MAX);
843 if ((error = xlog_find_verify_log_record(log,
844 start_blk, &new_blk,
845 (int)head_blk)) == -1) {
846 error = XFS_ERROR(EIO);
847 goto bp_err;
848 } else if (error)
849 goto bp_err;
850 if (new_blk != log_bbnum)
851 head_blk = new_blk;
852 } else if (error)
853 goto bp_err;
854 }
855
856 xlog_put_bp(bp);
857 if (head_blk == log_bbnum)
858 *return_head_blk = 0;
859 else
860 *return_head_blk = head_blk;
861 /*
862 * When returning here, we have a good block number. Bad block
863 * means that during a previous crash, we didn't have a clean break
864 * from cycle number N to cycle number N-1. In this case, we need
865 * to find the first block with cycle number N-1.
866 */
867 return 0;
868
869 bp_err:
870 xlog_put_bp(bp);
871
872 if (error)
873 xfs_warn(log->l_mp, "failed to find log head");
874 return error;
875 }
876
877 /*
878 * Find the sync block number or the tail of the log.
879 *
880 * This will be the block number of the last record to have its
881 * associated buffers synced to disk. Every log record header has
882 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
883 * to get a sync block number. The only concern is to figure out which
884 * log record header to believe.
885 *
886 * The following algorithm uses the log record header with the largest
887 * lsn. The entire log record does not need to be valid. We only care
888 * that the header is valid.
889 *
890 * We could speed up search by using current head_blk buffer, but it is not
891 * available.
892 */
893 STATIC int
894 xlog_find_tail(
895 struct xlog *log,
896 xfs_daddr_t *head_blk,
897 xfs_daddr_t *tail_blk)
898 {
899 xlog_rec_header_t *rhead;
900 xlog_op_header_t *op_head;
901 xfs_caddr_t offset = NULL;
902 xfs_buf_t *bp;
903 int error, i, found;
904 xfs_daddr_t umount_data_blk;
905 xfs_daddr_t after_umount_blk;
906 xfs_lsn_t tail_lsn;
907 int hblks;
908
909 found = 0;
910
911 /*
912 * Find previous log record
913 */
914 if ((error = xlog_find_head(log, head_blk)))
915 return error;
916
917 bp = xlog_get_bp(log, 1);
918 if (!bp)
919 return ENOMEM;
920 if (*head_blk == 0) { /* special case */
921 error = xlog_bread(log, 0, 1, bp, &offset);
922 if (error)
923 goto done;
924
925 if (xlog_get_cycle(offset) == 0) {
926 *tail_blk = 0;
927 /* leave all other log inited values alone */
928 goto done;
929 }
930 }
931
932 /*
933 * Search backwards looking for log record header block
934 */
935 ASSERT(*head_blk < INT_MAX);
936 for (i = (int)(*head_blk) - 1; i >= 0; i--) {
937 error = xlog_bread(log, i, 1, bp, &offset);
938 if (error)
939 goto done;
940
941 if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
942 found = 1;
943 break;
944 }
945 }
946 /*
947 * If we haven't found the log record header block, start looking
948 * again from the end of the physical log. XXXmiken: There should be
949 * a check here to make sure we didn't search more than N blocks in
950 * the previous code.
951 */
952 if (!found) {
953 for (i = log->l_logBBsize - 1; i >= (int)(*head_blk); i--) {
954 error = xlog_bread(log, i, 1, bp, &offset);
955 if (error)
956 goto done;
957
958 if (*(__be32 *)offset ==
959 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
960 found = 2;
961 break;
962 }
963 }
964 }
965 if (!found) {
966 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
967 xlog_put_bp(bp);
968 ASSERT(0);
969 return XFS_ERROR(EIO);
970 }
971
972 /* find blk_no of tail of log */
973 rhead = (xlog_rec_header_t *)offset;
974 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
975
976 /*
977 * Reset log values according to the state of the log when we
978 * crashed. In the case where head_blk == 0, we bump curr_cycle
979 * one because the next write starts a new cycle rather than
980 * continuing the cycle of the last good log record. At this
981 * point we have guaranteed that all partial log records have been
982 * accounted for. Therefore, we know that the last good log record
983 * written was complete and ended exactly on the end boundary
984 * of the physical log.
985 */
986 log->l_prev_block = i;
987 log->l_curr_block = (int)*head_blk;
988 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
989 if (found == 2)
990 log->l_curr_cycle++;
991 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
992 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
993 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
994 BBTOB(log->l_curr_block));
995 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
996 BBTOB(log->l_curr_block));
997
998 /*
999 * Look for unmount record. If we find it, then we know there
1000 * was a clean unmount. Since 'i' could be the last block in
1001 * the physical log, we convert to a log block before comparing
1002 * to the head_blk.
1003 *
1004 * Save the current tail lsn to use to pass to
1005 * xlog_clear_stale_blocks() below. We won't want to clear the
1006 * unmount record if there is one, so we pass the lsn of the
1007 * unmount record rather than the block after it.
1008 */
1009 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
1010 int h_size = be32_to_cpu(rhead->h_size);
1011 int h_version = be32_to_cpu(rhead->h_version);
1012
1013 if ((h_version & XLOG_VERSION_2) &&
1014 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
1015 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
1016 if (h_size % XLOG_HEADER_CYCLE_SIZE)
1017 hblks++;
1018 } else {
1019 hblks = 1;
1020 }
1021 } else {
1022 hblks = 1;
1023 }
1024 after_umount_blk = (i + hblks + (int)
1025 BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
1026 tail_lsn = atomic64_read(&log->l_tail_lsn);
1027 if (*head_blk == after_umount_blk &&
1028 be32_to_cpu(rhead->h_num_logops) == 1) {
1029 umount_data_blk = (i + hblks) % log->l_logBBsize;
1030 error = xlog_bread(log, umount_data_blk, 1, bp, &offset);
1031 if (error)
1032 goto done;
1033
1034 op_head = (xlog_op_header_t *)offset;
1035 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1036 /*
1037 * Set tail and last sync so that newly written
1038 * log records will point recovery to after the
1039 * current unmount record.
1040 */
1041 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1042 log->l_curr_cycle, after_umount_blk);
1043 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1044 log->l_curr_cycle, after_umount_blk);
1045 *tail_blk = after_umount_blk;
1046
1047 /*
1048 * Note that the unmount was clean. If the unmount
1049 * was not clean, we need to know this to rebuild the
1050 * superblock counters from the perag headers if we
1051 * have a filesystem using non-persistent counters.
1052 */
1053 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1054 }
1055 }
1056
1057 /*
1058 * Make sure that there are no blocks in front of the head
1059 * with the same cycle number as the head. This can happen
1060 * because we allow multiple outstanding log writes concurrently,
1061 * and the later writes might make it out before earlier ones.
1062 *
1063 * We use the lsn from before modifying it so that we'll never
1064 * overwrite the unmount record after a clean unmount.
1065 *
1066 * Do this only if we are going to recover the filesystem
1067 *
1068 * NOTE: This used to say "if (!readonly)"
1069 * However on Linux, we can & do recover a read-only filesystem.
1070 * We only skip recovery if NORECOVERY is specified on mount,
1071 * in which case we would not be here.
1072 *
1073 * But... if the -device- itself is readonly, just skip this.
1074 * We can't recover this device anyway, so it won't matter.
1075 */
1076 if (!xfs_readonly_buftarg(log->l_mp->m_logdev_targp))
1077 error = xlog_clear_stale_blocks(log, tail_lsn);
1078
1079 done:
1080 xlog_put_bp(bp);
1081
1082 if (error)
1083 xfs_warn(log->l_mp, "failed to locate log tail");
1084 return error;
1085 }
1086
1087 /*
1088 * Is the log zeroed at all?
1089 *
1090 * The last binary search should be changed to perform an X block read
1091 * once X becomes small enough. You can then search linearly through
1092 * the X blocks. This will cut down on the number of reads we need to do.
1093 *
1094 * If the log is partially zeroed, this routine will pass back the blkno
1095 * of the first block with cycle number 0. It won't have a complete LR
1096 * preceding it.
1097 *
1098 * Return:
1099 * 0 => the log is completely written to
1100 * -1 => use *blk_no as the first block of the log
1101 * >0 => error has occurred
1102 */
1103 STATIC int
1104 xlog_find_zeroed(
1105 struct xlog *log,
1106 xfs_daddr_t *blk_no)
1107 {
1108 xfs_buf_t *bp;
1109 xfs_caddr_t offset;
1110 uint first_cycle, last_cycle;
1111 xfs_daddr_t new_blk, last_blk, start_blk;
1112 xfs_daddr_t num_scan_bblks;
1113 int error, log_bbnum = log->l_logBBsize;
1114
1115 *blk_no = 0;
1116
1117 /* check totally zeroed log */
1118 bp = xlog_get_bp(log, 1);
1119 if (!bp)
1120 return ENOMEM;
1121 error = xlog_bread(log, 0, 1, bp, &offset);
1122 if (error)
1123 goto bp_err;
1124
1125 first_cycle = xlog_get_cycle(offset);
1126 if (first_cycle == 0) { /* completely zeroed log */
1127 *blk_no = 0;
1128 xlog_put_bp(bp);
1129 return -1;
1130 }
1131
1132 /* check partially zeroed log */
1133 error = xlog_bread(log, log_bbnum-1, 1, bp, &offset);
1134 if (error)
1135 goto bp_err;
1136
1137 last_cycle = xlog_get_cycle(offset);
1138 if (last_cycle != 0) { /* log completely written to */
1139 xlog_put_bp(bp);
1140 return 0;
1141 } else if (first_cycle != 1) {
1142 /*
1143 * If the cycle of the last block is zero, the cycle of
1144 * the first block must be 1. If it's not, maybe we're
1145 * not looking at a log... Bail out.
1146 */
1147 xfs_warn(log->l_mp,
1148 "Log inconsistent or not a log (last==0, first!=1)");
1149 error = XFS_ERROR(EINVAL);
1150 goto bp_err;
1151 }
1152
1153 /* we have a partially zeroed log */
1154 last_blk = log_bbnum-1;
1155 if ((error = xlog_find_cycle_start(log, bp, 0, &last_blk, 0)))
1156 goto bp_err;
1157
1158 /*
1159 * Validate the answer. Because there is no way to guarantee that
1160 * the entire log is made up of log records which are the same size,
1161 * we scan over the defined maximum blocks. At this point, the maximum
1162 * is not chosen to mean anything special. XXXmiken
1163 */
1164 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1165 ASSERT(num_scan_bblks <= INT_MAX);
1166
1167 if (last_blk < num_scan_bblks)
1168 num_scan_bblks = last_blk;
1169 start_blk = last_blk - num_scan_bblks;
1170
1171 /*
1172 * We search for any instances of cycle number 0 that occur before
1173 * our current estimate of the head. What we're trying to detect is
1174 * 1 ... | 0 | 1 | 0...
1175 * ^ binary search ends here
1176 */
1177 if ((error = xlog_find_verify_cycle(log, start_blk,
1178 (int)num_scan_bblks, 0, &new_blk)))
1179 goto bp_err;
1180 if (new_blk != -1)
1181 last_blk = new_blk;
1182
1183 /*
1184 * Potentially backup over partial log record write. We don't need
1185 * to search the end of the log because we know it is zero.
1186 */
1187 if ((error = xlog_find_verify_log_record(log, start_blk,
1188 &last_blk, 0)) == -1) {
1189 error = XFS_ERROR(EIO);
1190 goto bp_err;
1191 } else if (error)
1192 goto bp_err;
1193
1194 *blk_no = last_blk;
1195 bp_err:
1196 xlog_put_bp(bp);
1197 if (error)
1198 return error;
1199 return -1;
1200 }
1201
1202 /*
1203 * These are simple subroutines used by xlog_clear_stale_blocks() below
1204 * to initialize a buffer full of empty log record headers and write
1205 * them into the log.
1206 */
1207 STATIC void
1208 xlog_add_record(
1209 struct xlog *log,
1210 xfs_caddr_t buf,
1211 int cycle,
1212 int block,
1213 int tail_cycle,
1214 int tail_block)
1215 {
1216 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1217
1218 memset(buf, 0, BBSIZE);
1219 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1220 recp->h_cycle = cpu_to_be32(cycle);
1221 recp->h_version = cpu_to_be32(
1222 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1223 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1224 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1225 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1226 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1227 }
1228
1229 STATIC int
1230 xlog_write_log_records(
1231 struct xlog *log,
1232 int cycle,
1233 int start_block,
1234 int blocks,
1235 int tail_cycle,
1236 int tail_block)
1237 {
1238 xfs_caddr_t offset;
1239 xfs_buf_t *bp;
1240 int balign, ealign;
1241 int sectbb = log->l_sectBBsize;
1242 int end_block = start_block + blocks;
1243 int bufblks;
1244 int error = 0;
1245 int i, j = 0;
1246
1247 /*
1248 * Greedily allocate a buffer big enough to handle the full
1249 * range of basic blocks to be written. If that fails, try
1250 * a smaller size. We need to be able to write at least a
1251 * log sector, or we're out of luck.
1252 */
1253 bufblks = 1 << ffs(blocks);
1254 while (bufblks > log->l_logBBsize)
1255 bufblks >>= 1;
1256 while (!(bp = xlog_get_bp(log, bufblks))) {
1257 bufblks >>= 1;
1258 if (bufblks < sectbb)
1259 return ENOMEM;
1260 }
1261
1262 /* We may need to do a read at the start to fill in part of
1263 * the buffer in the starting sector not covered by the first
1264 * write below.
1265 */
1266 balign = round_down(start_block, sectbb);
1267 if (balign != start_block) {
1268 error = xlog_bread_noalign(log, start_block, 1, bp);
1269 if (error)
1270 goto out_put_bp;
1271
1272 j = start_block - balign;
1273 }
1274
1275 for (i = start_block; i < end_block; i += bufblks) {
1276 int bcount, endcount;
1277
1278 bcount = min(bufblks, end_block - start_block);
1279 endcount = bcount - j;
1280
1281 /* We may need to do a read at the end to fill in part of
1282 * the buffer in the final sector not covered by the write.
1283 * If this is the same sector as the above read, skip it.
1284 */
1285 ealign = round_down(end_block, sectbb);
1286 if (j == 0 && (start_block + endcount > ealign)) {
1287 offset = bp->b_addr + BBTOB(ealign - start_block);
1288 error = xlog_bread_offset(log, ealign, sectbb,
1289 bp, offset);
1290 if (error)
1291 break;
1292
1293 }
1294
1295 offset = xlog_align(log, start_block, endcount, bp);
1296 for (; j < endcount; j++) {
1297 xlog_add_record(log, offset, cycle, i+j,
1298 tail_cycle, tail_block);
1299 offset += BBSIZE;
1300 }
1301 error = xlog_bwrite(log, start_block, endcount, bp);
1302 if (error)
1303 break;
1304 start_block += endcount;
1305 j = 0;
1306 }
1307
1308 out_put_bp:
1309 xlog_put_bp(bp);
1310 return error;
1311 }
1312
1313 /*
1314 * This routine is called to blow away any incomplete log writes out
1315 * in front of the log head. We do this so that we won't become confused
1316 * if we come up, write only a little bit more, and then crash again.
1317 * If we leave the partial log records out there, this situation could
1318 * cause us to think those partial writes are valid blocks since they
1319 * have the current cycle number. We get rid of them by overwriting them
1320 * with empty log records with the old cycle number rather than the
1321 * current one.
1322 *
1323 * The tail lsn is passed in rather than taken from
1324 * the log so that we will not write over the unmount record after a
1325 * clean unmount in a 512 block log. Doing so would leave the log without
1326 * any valid log records in it until a new one was written. If we crashed
1327 * during that time we would not be able to recover.
1328 */
1329 STATIC int
1330 xlog_clear_stale_blocks(
1331 struct xlog *log,
1332 xfs_lsn_t tail_lsn)
1333 {
1334 int tail_cycle, head_cycle;
1335 int tail_block, head_block;
1336 int tail_distance, max_distance;
1337 int distance;
1338 int error;
1339
1340 tail_cycle = CYCLE_LSN(tail_lsn);
1341 tail_block = BLOCK_LSN(tail_lsn);
1342 head_cycle = log->l_curr_cycle;
1343 head_block = log->l_curr_block;
1344
1345 /*
1346 * Figure out the distance between the new head of the log
1347 * and the tail. We want to write over any blocks beyond the
1348 * head that we may have written just before the crash, but
1349 * we don't want to overwrite the tail of the log.
1350 */
1351 if (head_cycle == tail_cycle) {
1352 /*
1353 * The tail is behind the head in the physical log,
1354 * so the distance from the head to the tail is the
1355 * distance from the head to the end of the log plus
1356 * the distance from the beginning of the log to the
1357 * tail.
1358 */
1359 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) {
1360 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1361 XFS_ERRLEVEL_LOW, log->l_mp);
1362 return XFS_ERROR(EFSCORRUPTED);
1363 }
1364 tail_distance = tail_block + (log->l_logBBsize - head_block);
1365 } else {
1366 /*
1367 * The head is behind the tail in the physical log,
1368 * so the distance from the head to the tail is just
1369 * the tail block minus the head block.
1370 */
1371 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){
1372 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1373 XFS_ERRLEVEL_LOW, log->l_mp);
1374 return XFS_ERROR(EFSCORRUPTED);
1375 }
1376 tail_distance = tail_block - head_block;
1377 }
1378
1379 /*
1380 * If the head is right up against the tail, we can't clear
1381 * anything.
1382 */
1383 if (tail_distance <= 0) {
1384 ASSERT(tail_distance == 0);
1385 return 0;
1386 }
1387
1388 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1389 /*
1390 * Take the smaller of the maximum amount of outstanding I/O
1391 * we could have and the distance to the tail to clear out.
1392 * We take the smaller so that we don't overwrite the tail and
1393 * we don't waste all day writing from the head to the tail
1394 * for no reason.
1395 */
1396 max_distance = MIN(max_distance, tail_distance);
1397
1398 if ((head_block + max_distance) <= log->l_logBBsize) {
1399 /*
1400 * We can stomp all the blocks we need to without
1401 * wrapping around the end of the log. Just do it
1402 * in a single write. Use the cycle number of the
1403 * current cycle minus one so that the log will look like:
1404 * n ... | n - 1 ...
1405 */
1406 error = xlog_write_log_records(log, (head_cycle - 1),
1407 head_block, max_distance, tail_cycle,
1408 tail_block);
1409 if (error)
1410 return error;
1411 } else {
1412 /*
1413 * We need to wrap around the end of the physical log in
1414 * order to clear all the blocks. Do it in two separate
1415 * I/Os. The first write should be from the head to the
1416 * end of the physical log, and it should use the current
1417 * cycle number minus one just like above.
1418 */
1419 distance = log->l_logBBsize - head_block;
1420 error = xlog_write_log_records(log, (head_cycle - 1),
1421 head_block, distance, tail_cycle,
1422 tail_block);
1423
1424 if (error)
1425 return error;
1426
1427 /*
1428 * Now write the blocks at the start of the physical log.
1429 * This writes the remainder of the blocks we want to clear.
1430 * It uses the current cycle number since we're now on the
1431 * same cycle as the head so that we get:
1432 * n ... n ... | n - 1 ...
1433 * ^^^^^ blocks we're writing
1434 */
1435 distance = max_distance - (log->l_logBBsize - head_block);
1436 error = xlog_write_log_records(log, head_cycle, 0, distance,
1437 tail_cycle, tail_block);
1438 if (error)
1439 return error;
1440 }
1441
1442 return 0;
1443 }
1444
1445 /******************************************************************************
1446 *
1447 * Log recover routines
1448 *
1449 ******************************************************************************
1450 */
1451
1452 STATIC xlog_recover_t *
1453 xlog_recover_find_tid(
1454 struct hlist_head *head,
1455 xlog_tid_t tid)
1456 {
1457 xlog_recover_t *trans;
1458
1459 hlist_for_each_entry(trans, head, r_list) {
1460 if (trans->r_log_tid == tid)
1461 return trans;
1462 }
1463 return NULL;
1464 }
1465
1466 STATIC void
1467 xlog_recover_new_tid(
1468 struct hlist_head *head,
1469 xlog_tid_t tid,
1470 xfs_lsn_t lsn)
1471 {
1472 xlog_recover_t *trans;
1473
1474 trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
1475 trans->r_log_tid = tid;
1476 trans->r_lsn = lsn;
1477 INIT_LIST_HEAD(&trans->r_itemq);
1478
1479 INIT_HLIST_NODE(&trans->r_list);
1480 hlist_add_head(&trans->r_list, head);
1481 }
1482
1483 STATIC void
1484 xlog_recover_add_item(
1485 struct list_head *head)
1486 {
1487 xlog_recover_item_t *item;
1488
1489 item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
1490 INIT_LIST_HEAD(&item->ri_list);
1491 list_add_tail(&item->ri_list, head);
1492 }
1493
1494 STATIC int
1495 xlog_recover_add_to_cont_trans(
1496 struct xlog *log,
1497 struct xlog_recover *trans,
1498 xfs_caddr_t dp,
1499 int len)
1500 {
1501 xlog_recover_item_t *item;
1502 xfs_caddr_t ptr, old_ptr;
1503 int old_len;
1504
1505 if (list_empty(&trans->r_itemq)) {
1506 /* finish copying rest of trans header */
1507 xlog_recover_add_item(&trans->r_itemq);
1508 ptr = (xfs_caddr_t) &trans->r_theader +
1509 sizeof(xfs_trans_header_t) - len;
1510 memcpy(ptr, dp, len); /* d, s, l */
1511 return 0;
1512 }
1513 /* take the tail entry */
1514 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1515
1516 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
1517 old_len = item->ri_buf[item->ri_cnt-1].i_len;
1518
1519 ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
1520 memcpy(&ptr[old_len], dp, len); /* d, s, l */
1521 item->ri_buf[item->ri_cnt-1].i_len += len;
1522 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
1523 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
1524 return 0;
1525 }
1526
1527 /*
1528 * The next region to add is the start of a new region. It could be
1529 * a whole region or it could be the first part of a new region. Because
1530 * of this, the assumption here is that the type and size fields of all
1531 * format structures fit into the first 32 bits of the structure.
1532 *
1533 * This works because all regions must be 32 bit aligned. Therefore, we
1534 * either have both fields or we have neither field. In the case we have
1535 * neither field, the data part of the region is zero length. We only have
1536 * a log_op_header and can throw away the header since a new one will appear
1537 * later. If we have at least 4 bytes, then we can determine how many regions
1538 * will appear in the current log item.
1539 */
1540 STATIC int
1541 xlog_recover_add_to_trans(
1542 struct xlog *log,
1543 struct xlog_recover *trans,
1544 xfs_caddr_t dp,
1545 int len)
1546 {
1547 xfs_inode_log_format_t *in_f; /* any will do */
1548 xlog_recover_item_t *item;
1549 xfs_caddr_t ptr;
1550
1551 if (!len)
1552 return 0;
1553 if (list_empty(&trans->r_itemq)) {
1554 /* we need to catch log corruptions here */
1555 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
1556 xfs_warn(log->l_mp, "%s: bad header magic number",
1557 __func__);
1558 ASSERT(0);
1559 return XFS_ERROR(EIO);
1560 }
1561 if (len == sizeof(xfs_trans_header_t))
1562 xlog_recover_add_item(&trans->r_itemq);
1563 memcpy(&trans->r_theader, dp, len); /* d, s, l */
1564 return 0;
1565 }
1566
1567 ptr = kmem_alloc(len, KM_SLEEP);
1568 memcpy(ptr, dp, len);
1569 in_f = (xfs_inode_log_format_t *)ptr;
1570
1571 /* take the tail entry */
1572 item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
1573 if (item->ri_total != 0 &&
1574 item->ri_total == item->ri_cnt) {
1575 /* tail item is in use, get a new one */
1576 xlog_recover_add_item(&trans->r_itemq);
1577 item = list_entry(trans->r_itemq.prev,
1578 xlog_recover_item_t, ri_list);
1579 }
1580
1581 if (item->ri_total == 0) { /* first region to be added */
1582 if (in_f->ilf_size == 0 ||
1583 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
1584 xfs_warn(log->l_mp,
1585 "bad number of regions (%d) in inode log format",
1586 in_f->ilf_size);
1587 ASSERT(0);
1588 return XFS_ERROR(EIO);
1589 }
1590
1591 item->ri_total = in_f->ilf_size;
1592 item->ri_buf =
1593 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
1594 KM_SLEEP);
1595 }
1596 ASSERT(item->ri_total > item->ri_cnt);
1597 /* Description region is ri_buf[0] */
1598 item->ri_buf[item->ri_cnt].i_addr = ptr;
1599 item->ri_buf[item->ri_cnt].i_len = len;
1600 item->ri_cnt++;
1601 trace_xfs_log_recover_item_add(log, trans, item, 0);
1602 return 0;
1603 }
1604
1605 /*
1606 * Sort the log items in the transaction.
1607 *
1608 * The ordering constraints are defined by the inode allocation and unlink
1609 * behaviour. The rules are:
1610 *
1611 * 1. Every item is only logged once in a given transaction. Hence it
1612 * represents the last logged state of the item. Hence ordering is
1613 * dependent on the order in which operations need to be performed so
1614 * required initial conditions are always met.
1615 *
1616 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1617 * there's nothing to replay from them so we can simply cull them
1618 * from the transaction. However, we can't do that until after we've
1619 * replayed all the other items because they may be dependent on the
1620 * cancelled buffer and replaying the cancelled buffer can remove it
1621 * form the cancelled buffer table. Hence they have tobe done last.
1622 *
1623 * 3. Inode allocation buffers must be replayed before inode items that
1624 * read the buffer and replay changes into it. For filesystems using the
1625 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1626 * treated the same as inode allocation buffers as they create and
1627 * initialise the buffers directly.
1628 *
1629 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1630 * This ensures that inodes are completely flushed to the inode buffer
1631 * in a "free" state before we remove the unlinked inode list pointer.
1632 *
1633 * Hence the ordering needs to be inode allocation buffers first, inode items
1634 * second, inode unlink buffers third and cancelled buffers last.
1635 *
1636 * But there's a problem with that - we can't tell an inode allocation buffer
1637 * apart from a regular buffer, so we can't separate them. We can, however,
1638 * tell an inode unlink buffer from the others, and so we can separate them out
1639 * from all the other buffers and move them to last.
1640 *
1641 * Hence, 4 lists, in order from head to tail:
1642 * - buffer_list for all buffers except cancelled/inode unlink buffers
1643 * - item_list for all non-buffer items
1644 * - inode_buffer_list for inode unlink buffers
1645 * - cancel_list for the cancelled buffers
1646 *
1647 * Note that we add objects to the tail of the lists so that first-to-last
1648 * ordering is preserved within the lists. Adding objects to the head of the
1649 * list means when we traverse from the head we walk them in last-to-first
1650 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1651 * but for all other items there may be specific ordering that we need to
1652 * preserve.
1653 */
1654 STATIC int
1655 xlog_recover_reorder_trans(
1656 struct xlog *log,
1657 struct xlog_recover *trans,
1658 int pass)
1659 {
1660 xlog_recover_item_t *item, *n;
1661 LIST_HEAD(sort_list);
1662 LIST_HEAD(cancel_list);
1663 LIST_HEAD(buffer_list);
1664 LIST_HEAD(inode_buffer_list);
1665 LIST_HEAD(inode_list);
1666
1667 list_splice_init(&trans->r_itemq, &sort_list);
1668 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1669 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1670
1671 switch (ITEM_TYPE(item)) {
1672 case XFS_LI_ICREATE:
1673 list_move_tail(&item->ri_list, &buffer_list);
1674 break;
1675 case XFS_LI_BUF:
1676 if (buf_f->blf_flags & XFS_BLF_CANCEL) {
1677 trace_xfs_log_recover_item_reorder_head(log,
1678 trans, item, pass);
1679 list_move(&item->ri_list, &cancel_list);
1680 break;
1681 }
1682 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
1683 list_move(&item->ri_list, &inode_buffer_list);
1684 break;
1685 }
1686 list_move_tail(&item->ri_list, &buffer_list);
1687 break;
1688 case XFS_LI_INODE:
1689 case XFS_LI_DQUOT:
1690 case XFS_LI_QUOTAOFF:
1691 case XFS_LI_EFD:
1692 case XFS_LI_EFI:
1693 trace_xfs_log_recover_item_reorder_tail(log,
1694 trans, item, pass);
1695 list_move_tail(&item->ri_list, &inode_list);
1696 break;
1697 default:
1698 xfs_warn(log->l_mp,
1699 "%s: unrecognized type of log operation",
1700 __func__);
1701 ASSERT(0);
1702 return XFS_ERROR(EIO);
1703 }
1704 }
1705 ASSERT(list_empty(&sort_list));
1706 if (!list_empty(&buffer_list))
1707 list_splice(&buffer_list, &trans->r_itemq);
1708 if (!list_empty(&inode_list))
1709 list_splice_tail(&inode_list, &trans->r_itemq);
1710 if (!list_empty(&inode_buffer_list))
1711 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1712 if (!list_empty(&cancel_list))
1713 list_splice_tail(&cancel_list, &trans->r_itemq);
1714 return 0;
1715 }
1716
1717 /*
1718 * Build up the table of buf cancel records so that we don't replay
1719 * cancelled data in the second pass. For buffer records that are
1720 * not cancel records, there is nothing to do here so we just return.
1721 *
1722 * If we get a cancel record which is already in the table, this indicates
1723 * that the buffer was cancelled multiple times. In order to ensure
1724 * that during pass 2 we keep the record in the table until we reach its
1725 * last occurrence in the log, we keep a reference count in the cancel
1726 * record in the table to tell us how many times we expect to see this
1727 * record during the second pass.
1728 */
1729 STATIC int
1730 xlog_recover_buffer_pass1(
1731 struct xlog *log,
1732 struct xlog_recover_item *item)
1733 {
1734 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1735 struct list_head *bucket;
1736 struct xfs_buf_cancel *bcp;
1737
1738 /*
1739 * If this isn't a cancel buffer item, then just return.
1740 */
1741 if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
1742 trace_xfs_log_recover_buf_not_cancel(log, buf_f);
1743 return 0;
1744 }
1745
1746 /*
1747 * Insert an xfs_buf_cancel record into the hash table of them.
1748 * If there is already an identical record, bump its reference count.
1749 */
1750 bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
1751 list_for_each_entry(bcp, bucket, bc_list) {
1752 if (bcp->bc_blkno == buf_f->blf_blkno &&
1753 bcp->bc_len == buf_f->blf_len) {
1754 bcp->bc_refcount++;
1755 trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
1756 return 0;
1757 }
1758 }
1759
1760 bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
1761 bcp->bc_blkno = buf_f->blf_blkno;
1762 bcp->bc_len = buf_f->blf_len;
1763 bcp->bc_refcount = 1;
1764 list_add_tail(&bcp->bc_list, bucket);
1765
1766 trace_xfs_log_recover_buf_cancel_add(log, buf_f);
1767 return 0;
1768 }
1769
1770 /*
1771 * Check to see whether the buffer being recovered has a corresponding
1772 * entry in the buffer cancel record table. If it is, return the cancel
1773 * buffer structure to the caller.
1774 */
1775 STATIC struct xfs_buf_cancel *
1776 xlog_peek_buffer_cancelled(
1777 struct xlog *log,
1778 xfs_daddr_t blkno,
1779 uint len,
1780 ushort flags)
1781 {
1782 struct list_head *bucket;
1783 struct xfs_buf_cancel *bcp;
1784
1785 if (!log->l_buf_cancel_table) {
1786 /* empty table means no cancelled buffers in the log */
1787 ASSERT(!(flags & XFS_BLF_CANCEL));
1788 return NULL;
1789 }
1790
1791 bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
1792 list_for_each_entry(bcp, bucket, bc_list) {
1793 if (bcp->bc_blkno == blkno && bcp->bc_len == len)
1794 return bcp;
1795 }
1796
1797 /*
1798 * We didn't find a corresponding entry in the table, so return 0 so
1799 * that the buffer is NOT cancelled.
1800 */
1801 ASSERT(!(flags & XFS_BLF_CANCEL));
1802 return NULL;
1803 }
1804
1805 /*
1806 * If the buffer is being cancelled then return 1 so that it will be cancelled,
1807 * otherwise return 0. If the buffer is actually a buffer cancel item
1808 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1809 * table and remove it from the table if this is the last reference.
1810 *
1811 * We remove the cancel record from the table when we encounter its last
1812 * occurrence in the log so that if the same buffer is re-used again after its
1813 * last cancellation we actually replay the changes made at that point.
1814 */
1815 STATIC int
1816 xlog_check_buffer_cancelled(
1817 struct xlog *log,
1818 xfs_daddr_t blkno,
1819 uint len,
1820 ushort flags)
1821 {
1822 struct xfs_buf_cancel *bcp;
1823
1824 bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
1825 if (!bcp)
1826 return 0;
1827
1828 /*
1829 * We've go a match, so return 1 so that the recovery of this buffer
1830 * is cancelled. If this buffer is actually a buffer cancel log
1831 * item, then decrement the refcount on the one in the table and
1832 * remove it if this is the last reference.
1833 */
1834 if (flags & XFS_BLF_CANCEL) {
1835 if (--bcp->bc_refcount == 0) {
1836 list_del(&bcp->bc_list);
1837 kmem_free(bcp);
1838 }
1839 }
1840 return 1;
1841 }
1842
1843 /*
1844 * Perform recovery for a buffer full of inodes. In these buffers, the only
1845 * data which should be recovered is that which corresponds to the
1846 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1847 * data for the inodes is always logged through the inodes themselves rather
1848 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1849 *
1850 * The only time when buffers full of inodes are fully recovered is when the
1851 * buffer is full of newly allocated inodes. In this case the buffer will
1852 * not be marked as an inode buffer and so will be sent to
1853 * xlog_recover_do_reg_buffer() below during recovery.
1854 */
1855 STATIC int
1856 xlog_recover_do_inode_buffer(
1857 struct xfs_mount *mp,
1858 xlog_recover_item_t *item,
1859 struct xfs_buf *bp,
1860 xfs_buf_log_format_t *buf_f)
1861 {
1862 int i;
1863 int item_index = 0;
1864 int bit = 0;
1865 int nbits = 0;
1866 int reg_buf_offset = 0;
1867 int reg_buf_bytes = 0;
1868 int next_unlinked_offset;
1869 int inodes_per_buf;
1870 xfs_agino_t *logged_nextp;
1871 xfs_agino_t *buffer_nextp;
1872
1873 trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
1874
1875 /*
1876 * Post recovery validation only works properly on CRC enabled
1877 * filesystems.
1878 */
1879 if (xfs_sb_version_hascrc(&mp->m_sb))
1880 bp->b_ops = &xfs_inode_buf_ops;
1881
1882 inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
1883 for (i = 0; i < inodes_per_buf; i++) {
1884 next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
1885 offsetof(xfs_dinode_t, di_next_unlinked);
1886
1887 while (next_unlinked_offset >=
1888 (reg_buf_offset + reg_buf_bytes)) {
1889 /*
1890 * The next di_next_unlinked field is beyond
1891 * the current logged region. Find the next
1892 * logged region that contains or is beyond
1893 * the current di_next_unlinked field.
1894 */
1895 bit += nbits;
1896 bit = xfs_next_bit(buf_f->blf_data_map,
1897 buf_f->blf_map_size, bit);
1898
1899 /*
1900 * If there are no more logged regions in the
1901 * buffer, then we're done.
1902 */
1903 if (bit == -1)
1904 return 0;
1905
1906 nbits = xfs_contig_bits(buf_f->blf_data_map,
1907 buf_f->blf_map_size, bit);
1908 ASSERT(nbits > 0);
1909 reg_buf_offset = bit << XFS_BLF_SHIFT;
1910 reg_buf_bytes = nbits << XFS_BLF_SHIFT;
1911 item_index++;
1912 }
1913
1914 /*
1915 * If the current logged region starts after the current
1916 * di_next_unlinked field, then move on to the next
1917 * di_next_unlinked field.
1918 */
1919 if (next_unlinked_offset < reg_buf_offset)
1920 continue;
1921
1922 ASSERT(item->ri_buf[item_index].i_addr != NULL);
1923 ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
1924 ASSERT((reg_buf_offset + reg_buf_bytes) <=
1925 BBTOB(bp->b_io_length));
1926
1927 /*
1928 * The current logged region contains a copy of the
1929 * current di_next_unlinked field. Extract its value
1930 * and copy it to the buffer copy.
1931 */
1932 logged_nextp = item->ri_buf[item_index].i_addr +
1933 next_unlinked_offset - reg_buf_offset;
1934 if (unlikely(*logged_nextp == 0)) {
1935 xfs_alert(mp,
1936 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1937 "Trying to replay bad (0) inode di_next_unlinked field.",
1938 item, bp);
1939 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1940 XFS_ERRLEVEL_LOW, mp);
1941 return XFS_ERROR(EFSCORRUPTED);
1942 }
1943
1944 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp,
1945 next_unlinked_offset);
1946 *buffer_nextp = *logged_nextp;
1947
1948 /*
1949 * If necessary, recalculate the CRC in the on-disk inode. We
1950 * have to leave the inode in a consistent state for whoever
1951 * reads it next....
1952 */
1953 xfs_dinode_calc_crc(mp, (struct xfs_dinode *)
1954 xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
1955
1956 }
1957
1958 return 0;
1959 }
1960
1961 /*
1962 * V5 filesystems know the age of the buffer on disk being recovered. We can
1963 * have newer objects on disk than we are replaying, and so for these cases we
1964 * don't want to replay the current change as that will make the buffer contents
1965 * temporarily invalid on disk.
1966 *
1967 * The magic number might not match the buffer type we are going to recover
1968 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
1969 * extract the LSN of the existing object in the buffer based on it's current
1970 * magic number. If we don't recognise the magic number in the buffer, then
1971 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1972 * so can recover the buffer.
1973 *
1974 * Note: we cannot rely solely on magic number matches to determine that the
1975 * buffer has a valid LSN - we also need to verify that it belongs to this
1976 * filesystem, so we need to extract the object's LSN and compare it to that
1977 * which we read from the superblock. If the UUIDs don't match, then we've got a
1978 * stale metadata block from an old filesystem instance that we need to recover
1979 * over the top of.
1980 */
1981 static xfs_lsn_t
1982 xlog_recover_get_buf_lsn(
1983 struct xfs_mount *mp,
1984 struct xfs_buf *bp)
1985 {
1986 __uint32_t magic32;
1987 __uint16_t magic16;
1988 __uint16_t magicda;
1989 void *blk = bp->b_addr;
1990 uuid_t *uuid;
1991 xfs_lsn_t lsn = -1;
1992
1993 /* v4 filesystems always recover immediately */
1994 if (!xfs_sb_version_hascrc(&mp->m_sb))
1995 goto recover_immediately;
1996
1997 magic32 = be32_to_cpu(*(__be32 *)blk);
1998 switch (magic32) {
1999 case XFS_ABTB_CRC_MAGIC:
2000 case XFS_ABTC_CRC_MAGIC:
2001 case XFS_ABTB_MAGIC:
2002 case XFS_ABTC_MAGIC:
2003 case XFS_IBT_CRC_MAGIC:
2004 case XFS_IBT_MAGIC: {
2005 struct xfs_btree_block *btb = blk;
2006
2007 lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
2008 uuid = &btb->bb_u.s.bb_uuid;
2009 break;
2010 }
2011 case XFS_BMAP_CRC_MAGIC:
2012 case XFS_BMAP_MAGIC: {
2013 struct xfs_btree_block *btb = blk;
2014
2015 lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
2016 uuid = &btb->bb_u.l.bb_uuid;
2017 break;
2018 }
2019 case XFS_AGF_MAGIC:
2020 lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
2021 uuid = &((struct xfs_agf *)blk)->agf_uuid;
2022 break;
2023 case XFS_AGFL_MAGIC:
2024 lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
2025 uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
2026 break;
2027 case XFS_AGI_MAGIC:
2028 lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
2029 uuid = &((struct xfs_agi *)blk)->agi_uuid;
2030 break;
2031 case XFS_SYMLINK_MAGIC:
2032 lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
2033 uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
2034 break;
2035 case XFS_DIR3_BLOCK_MAGIC:
2036 case XFS_DIR3_DATA_MAGIC:
2037 case XFS_DIR3_FREE_MAGIC:
2038 lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
2039 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
2040 break;
2041 case XFS_ATTR3_RMT_MAGIC:
2042 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn);
2043 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid;
2044 break;
2045 case XFS_SB_MAGIC:
2046 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
2047 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
2048 break;
2049 default:
2050 break;
2051 }
2052
2053 if (lsn != (xfs_lsn_t)-1) {
2054 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2055 goto recover_immediately;
2056 return lsn;
2057 }
2058
2059 magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
2060 switch (magicda) {
2061 case XFS_DIR3_LEAF1_MAGIC:
2062 case XFS_DIR3_LEAFN_MAGIC:
2063 case XFS_DA3_NODE_MAGIC:
2064 lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
2065 uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
2066 break;
2067 default:
2068 break;
2069 }
2070
2071 if (lsn != (xfs_lsn_t)-1) {
2072 if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
2073 goto recover_immediately;
2074 return lsn;
2075 }
2076
2077 /*
2078 * We do individual object checks on dquot and inode buffers as they
2079 * have their own individual LSN records. Also, we could have a stale
2080 * buffer here, so we have to at least recognise these buffer types.
2081 *
2082 * A notd complexity here is inode unlinked list processing - it logs
2083 * the inode directly in the buffer, but we don't know which inodes have
2084 * been modified, and there is no global buffer LSN. Hence we need to
2085 * recover all inode buffer types immediately. This problem will be
2086 * fixed by logical logging of the unlinked list modifications.
2087 */
2088 magic16 = be16_to_cpu(*(__be16 *)blk);
2089 switch (magic16) {
2090 case XFS_DQUOT_MAGIC:
2091 case XFS_DINODE_MAGIC:
2092 goto recover_immediately;
2093 default:
2094 break;
2095 }
2096
2097 /* unknown buffer contents, recover immediately */
2098
2099 recover_immediately:
2100 return (xfs_lsn_t)-1;
2101
2102 }
2103
2104 /*
2105 * Validate the recovered buffer is of the correct type and attach the
2106 * appropriate buffer operations to them for writeback. Magic numbers are in a
2107 * few places:
2108 * the first 16 bits of the buffer (inode buffer, dquot buffer),
2109 * the first 32 bits of the buffer (most blocks),
2110 * inside a struct xfs_da_blkinfo at the start of the buffer.
2111 */
2112 static void
2113 xlog_recover_validate_buf_type(
2114 struct xfs_mount *mp,
2115 struct xfs_buf *bp,
2116 xfs_buf_log_format_t *buf_f)
2117 {
2118 struct xfs_da_blkinfo *info = bp->b_addr;
2119 __uint32_t magic32;
2120 __uint16_t magic16;
2121 __uint16_t magicda;
2122
2123 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
2124 magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
2125 magicda = be16_to_cpu(info->magic);
2126 switch (xfs_blft_from_flags(buf_f)) {
2127 case XFS_BLFT_BTREE_BUF:
2128 switch (magic32) {
2129 case XFS_ABTB_CRC_MAGIC:
2130 case XFS_ABTC_CRC_MAGIC:
2131 case XFS_ABTB_MAGIC:
2132 case XFS_ABTC_MAGIC:
2133 bp->b_ops = &xfs_allocbt_buf_ops;
2134 break;
2135 case XFS_IBT_CRC_MAGIC:
2136 case XFS_IBT_MAGIC:
2137 bp->b_ops = &xfs_inobt_buf_ops;
2138 break;
2139 case XFS_BMAP_CRC_MAGIC:
2140 case XFS_BMAP_MAGIC:
2141 bp->b_ops = &xfs_bmbt_buf_ops;
2142 break;
2143 default:
2144 xfs_warn(mp, "Bad btree block magic!");
2145 ASSERT(0);
2146 break;
2147 }
2148 break;
2149 case XFS_BLFT_AGF_BUF:
2150 if (magic32 != XFS_AGF_MAGIC) {
2151 xfs_warn(mp, "Bad AGF block magic!");
2152 ASSERT(0);
2153 break;
2154 }
2155 bp->b_ops = &xfs_agf_buf_ops;
2156 break;
2157 case XFS_BLFT_AGFL_BUF:
2158 if (!xfs_sb_version_hascrc(&mp->m_sb))
2159 break;
2160 if (magic32 != XFS_AGFL_MAGIC) {
2161 xfs_warn(mp, "Bad AGFL block magic!");
2162 ASSERT(0);
2163 break;
2164 }
2165 bp->b_ops = &xfs_agfl_buf_ops;
2166 break;
2167 case XFS_BLFT_AGI_BUF:
2168 if (magic32 != XFS_AGI_MAGIC) {
2169 xfs_warn(mp, "Bad AGI block magic!");
2170 ASSERT(0);
2171 break;
2172 }
2173 bp->b_ops = &xfs_agi_buf_ops;
2174 break;
2175 case XFS_BLFT_UDQUOT_BUF:
2176 case XFS_BLFT_PDQUOT_BUF:
2177 case XFS_BLFT_GDQUOT_BUF:
2178 #ifdef CONFIG_XFS_QUOTA
2179 if (magic16 != XFS_DQUOT_MAGIC) {
2180 xfs_warn(mp, "Bad DQUOT block magic!");
2181 ASSERT(0);
2182 break;
2183 }
2184 bp->b_ops = &xfs_dquot_buf_ops;
2185 #else
2186 xfs_alert(mp,
2187 "Trying to recover dquots without QUOTA support built in!");
2188 ASSERT(0);
2189 #endif
2190 break;
2191 case XFS_BLFT_DINO_BUF:
2192 /*
2193 * we get here with inode allocation buffers, not buffers that
2194 * track unlinked list changes.
2195 */
2196 if (magic16 != XFS_DINODE_MAGIC) {
2197 xfs_warn(mp, "Bad INODE block magic!");
2198 ASSERT(0);
2199 break;
2200 }
2201 bp->b_ops = &xfs_inode_buf_ops;
2202 break;
2203 case XFS_BLFT_SYMLINK_BUF:
2204 if (magic32 != XFS_SYMLINK_MAGIC) {
2205 xfs_warn(mp, "Bad symlink block magic!");
2206 ASSERT(0);
2207 break;
2208 }
2209 bp->b_ops = &xfs_symlink_buf_ops;
2210 break;
2211 case XFS_BLFT_DIR_BLOCK_BUF:
2212 if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
2213 magic32 != XFS_DIR3_BLOCK_MAGIC) {
2214 xfs_warn(mp, "Bad dir block magic!");
2215 ASSERT(0);
2216 break;
2217 }
2218 bp->b_ops = &xfs_dir3_block_buf_ops;
2219 break;
2220 case XFS_BLFT_DIR_DATA_BUF:
2221 if (magic32 != XFS_DIR2_DATA_MAGIC &&
2222 magic32 != XFS_DIR3_DATA_MAGIC) {
2223 xfs_warn(mp, "Bad dir data magic!");
2224 ASSERT(0);
2225 break;
2226 }
2227 bp->b_ops = &xfs_dir3_data_buf_ops;
2228 break;
2229 case XFS_BLFT_DIR_FREE_BUF:
2230 if (magic32 != XFS_DIR2_FREE_MAGIC &&
2231 magic32 != XFS_DIR3_FREE_MAGIC) {
2232 xfs_warn(mp, "Bad dir3 free magic!");
2233 ASSERT(0);
2234 break;
2235 }
2236 bp->b_ops = &xfs_dir3_free_buf_ops;
2237 break;
2238 case XFS_BLFT_DIR_LEAF1_BUF:
2239 if (magicda != XFS_DIR2_LEAF1_MAGIC &&
2240 magicda != XFS_DIR3_LEAF1_MAGIC) {
2241 xfs_warn(mp, "Bad dir leaf1 magic!");
2242 ASSERT(0);
2243 break;
2244 }
2245 bp->b_ops = &xfs_dir3_leaf1_buf_ops;
2246 break;
2247 case XFS_BLFT_DIR_LEAFN_BUF:
2248 if (magicda != XFS_DIR2_LEAFN_MAGIC &&
2249 magicda != XFS_DIR3_LEAFN_MAGIC) {
2250 xfs_warn(mp, "Bad dir leafn magic!");
2251 ASSERT(0);
2252 break;
2253 }
2254 bp->b_ops = &xfs_dir3_leafn_buf_ops;
2255 break;
2256 case XFS_BLFT_DA_NODE_BUF:
2257 if (magicda != XFS_DA_NODE_MAGIC &&
2258 magicda != XFS_DA3_NODE_MAGIC) {
2259 xfs_warn(mp, "Bad da node magic!");
2260 ASSERT(0);
2261 break;
2262 }
2263 bp->b_ops = &xfs_da3_node_buf_ops;
2264 break;
2265 case XFS_BLFT_ATTR_LEAF_BUF:
2266 if (magicda != XFS_ATTR_LEAF_MAGIC &&
2267 magicda != XFS_ATTR3_LEAF_MAGIC) {
2268 xfs_warn(mp, "Bad attr leaf magic!");
2269 ASSERT(0);
2270 break;
2271 }
2272 bp->b_ops = &xfs_attr3_leaf_buf_ops;
2273 break;
2274 case XFS_BLFT_ATTR_RMT_BUF:
2275 if (!xfs_sb_version_hascrc(&mp->m_sb))
2276 break;
2277 if (magic32 != XFS_ATTR3_RMT_MAGIC) {
2278 xfs_warn(mp, "Bad attr remote magic!");
2279 ASSERT(0);
2280 break;
2281 }
2282 bp->b_ops = &xfs_attr3_rmt_buf_ops;
2283 break;
2284 case XFS_BLFT_SB_BUF:
2285 if (magic32 != XFS_SB_MAGIC) {
2286 xfs_warn(mp, "Bad SB block magic!");
2287 ASSERT(0);
2288 break;
2289 }
2290 bp->b_ops = &xfs_sb_buf_ops;
2291 break;
2292 default:
2293 xfs_warn(mp, "Unknown buffer type %d!",
2294 xfs_blft_from_flags(buf_f));
2295 break;
2296 }
2297 }
2298
2299 /*
2300 * Perform a 'normal' buffer recovery. Each logged region of the
2301 * buffer should be copied over the corresponding region in the
2302 * given buffer. The bitmap in the buf log format structure indicates
2303 * where to place the logged data.
2304 */
2305 STATIC void
2306 xlog_recover_do_reg_buffer(
2307 struct xfs_mount *mp,
2308 xlog_recover_item_t *item,
2309 struct xfs_buf *bp,
2310 xfs_buf_log_format_t *buf_f)
2311 {
2312 int i;
2313 int bit;
2314 int nbits;
2315 int error;
2316
2317 trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
2318
2319 bit = 0;
2320 i = 1; /* 0 is the buf format structure */
2321 while (1) {
2322 bit = xfs_next_bit(buf_f->blf_data_map,
2323 buf_f->blf_map_size, bit);
2324 if (bit == -1)
2325 break;
2326 nbits = xfs_contig_bits(buf_f->blf_data_map,
2327 buf_f->blf_map_size, bit);
2328 ASSERT(nbits > 0);
2329 ASSERT(item->ri_buf[i].i_addr != NULL);
2330 ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
2331 ASSERT(BBTOB(bp->b_io_length) >=
2332 ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
2333
2334 /*
2335 * The dirty regions logged in the buffer, even though
2336 * contiguous, may span multiple chunks. This is because the
2337 * dirty region may span a physical page boundary in a buffer
2338 * and hence be split into two separate vectors for writing into
2339 * the log. Hence we need to trim nbits back to the length of
2340 * the current region being copied out of the log.
2341 */
2342 if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
2343 nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
2344
2345 /*
2346 * Do a sanity check if this is a dquot buffer. Just checking
2347 * the first dquot in the buffer should do. XXXThis is
2348 * probably a good thing to do for other buf types also.
2349 */
2350 error = 0;
2351 if (buf_f->blf_flags &
2352 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2353 if (item->ri_buf[i].i_addr == NULL) {
2354 xfs_alert(mp,
2355 "XFS: NULL dquot in %s.", __func__);
2356 goto next;
2357 }
2358 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) {
2359 xfs_alert(mp,
2360 "XFS: dquot too small (%d) in %s.",
2361 item->ri_buf[i].i_len, __func__);
2362 goto next;
2363 }
2364 error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr,
2365 -1, 0, XFS_QMOPT_DOWARN,
2366 "dquot_buf_recover");
2367 if (error)
2368 goto next;
2369 }
2370
2371 memcpy(xfs_buf_offset(bp,
2372 (uint)bit << XFS_BLF_SHIFT), /* dest */
2373 item->ri_buf[i].i_addr, /* source */
2374 nbits<<XFS_BLF_SHIFT); /* length */
2375 next:
2376 i++;
2377 bit += nbits;
2378 }
2379
2380 /* Shouldn't be any more regions */
2381 ASSERT(i == item->ri_total);
2382
2383 /*
2384 * We can only do post recovery validation on items on CRC enabled
2385 * fielsystems as we need to know when the buffer was written to be able
2386 * to determine if we should have replayed the item. If we replay old
2387 * metadata over a newer buffer, then it will enter a temporarily
2388 * inconsistent state resulting in verification failures. Hence for now
2389 * just avoid the verification stage for non-crc filesystems
2390 */
2391 if (xfs_sb_version_hascrc(&mp->m_sb))
2392 xlog_recover_validate_buf_type(mp, bp, buf_f);
2393 }
2394
2395 /*
2396 * Do some primitive error checking on ondisk dquot data structures.
2397 */
2398 int
2399 xfs_qm_dqcheck(
2400 struct xfs_mount *mp,
2401 xfs_disk_dquot_t *ddq,
2402 xfs_dqid_t id,
2403 uint type, /* used only when IO_dorepair is true */
2404 uint flags,
2405 char *str)
2406 {
2407 xfs_dqblk_t *d = (xfs_dqblk_t *)ddq;
2408 int errs = 0;
2409
2410 /*
2411 * We can encounter an uninitialized dquot buffer for 2 reasons:
2412 * 1. If we crash while deleting the quotainode(s), and those blks got
2413 * used for user data. This is because we take the path of regular
2414 * file deletion; however, the size field of quotainodes is never
2415 * updated, so all the tricks that we play in itruncate_finish
2416 * don't quite matter.
2417 *
2418 * 2. We don't play the quota buffers when there's a quotaoff logitem.
2419 * But the allocation will be replayed so we'll end up with an
2420 * uninitialized quota block.
2421 *
2422 * This is all fine; things are still consistent, and we haven't lost
2423 * any quota information. Just don't complain about bad dquot blks.
2424 */
2425 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
2426 if (flags & XFS_QMOPT_DOWARN)
2427 xfs_alert(mp,
2428 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
2429 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC);
2430 errs++;
2431 }
2432 if (ddq->d_version != XFS_DQUOT_VERSION) {
2433 if (flags & XFS_QMOPT_DOWARN)
2434 xfs_alert(mp,
2435 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x",
2436 str, id, ddq->d_version, XFS_DQUOT_VERSION);
2437 errs++;
2438 }
2439
2440 if (ddq->d_flags != XFS_DQ_USER &&
2441 ddq->d_flags != XFS_DQ_PROJ &&
2442 ddq->d_flags != XFS_DQ_GROUP) {
2443 if (flags & XFS_QMOPT_DOWARN)
2444 xfs_alert(mp,
2445 "%s : XFS dquot ID 0x%x, unknown flags 0x%x",
2446 str, id, ddq->d_flags);
2447 errs++;
2448 }
2449
2450 if (id != -1 && id != be32_to_cpu(ddq->d_id)) {
2451 if (flags & XFS_QMOPT_DOWARN)
2452 xfs_alert(mp,
2453 "%s : ondisk-dquot 0x%p, ID mismatch: "
2454 "0x%x expected, found id 0x%x",
2455 str, ddq, id, be32_to_cpu(ddq->d_id));
2456 errs++;
2457 }
2458
2459 if (!errs && ddq->d_id) {
2460 if (ddq->d_blk_softlimit &&
2461 be64_to_cpu(ddq->d_bcount) >
2462 be64_to_cpu(ddq->d_blk_softlimit)) {
2463 if (!ddq->d_btimer) {
2464 if (flags & XFS_QMOPT_DOWARN)
2465 xfs_alert(mp,
2466 "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED",
2467 str, (int)be32_to_cpu(ddq->d_id), ddq);
2468 errs++;
2469 }
2470 }
2471 if (ddq->d_ino_softlimit &&
2472 be64_to_cpu(ddq->d_icount) >
2473 be64_to_cpu(ddq->d_ino_softlimit)) {
2474 if (!ddq->d_itimer) {
2475 if (flags & XFS_QMOPT_DOWARN)
2476 xfs_alert(mp,
2477 "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED",
2478 str, (int)be32_to_cpu(ddq->d_id), ddq);
2479 errs++;
2480 }
2481 }
2482 if (ddq->d_rtb_softlimit &&
2483 be64_to_cpu(ddq->d_rtbcount) >
2484 be64_to_cpu(ddq->d_rtb_softlimit)) {
2485 if (!ddq->d_rtbtimer) {
2486 if (flags & XFS_QMOPT_DOWARN)
2487 xfs_alert(mp,
2488 "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED",
2489 str, (int)be32_to_cpu(ddq->d_id), ddq);
2490 errs++;
2491 }
2492 }
2493 }
2494
2495 if (!errs || !(flags & XFS_QMOPT_DQREPAIR))
2496 return errs;
2497
2498 if (flags & XFS_QMOPT_DOWARN)
2499 xfs_notice(mp, "Re-initializing dquot ID 0x%x", id);
2500
2501 /*
2502 * Typically, a repair is only requested by quotacheck.
2503 */
2504 ASSERT(id != -1);
2505 ASSERT(flags & XFS_QMOPT_DQREPAIR);
2506 memset(d, 0, sizeof(xfs_dqblk_t));
2507
2508 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
2509 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
2510 d->dd_diskdq.d_flags = type;
2511 d->dd_diskdq.d_id = cpu_to_be32(id);
2512
2513 if (xfs_sb_version_hascrc(&mp->m_sb)) {
2514 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
2515 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
2516 XFS_DQUOT_CRC_OFF);
2517 }
2518
2519 return errs;
2520 }
2521
2522 /*
2523 * Perform a dquot buffer recovery.
2524 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2525 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2526 * Else, treat it as a regular buffer and do recovery.
2527 */
2528 STATIC void
2529 xlog_recover_do_dquot_buffer(
2530 struct xfs_mount *mp,
2531 struct xlog *log,
2532 struct xlog_recover_item *item,
2533 struct xfs_buf *bp,
2534 struct xfs_buf_log_format *buf_f)
2535 {
2536 uint type;
2537
2538 trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
2539
2540 /*
2541 * Filesystems are required to send in quota flags at mount time.
2542 */
2543 if (mp->m_qflags == 0) {
2544 return;
2545 }
2546
2547 type = 0;
2548 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
2549 type |= XFS_DQ_USER;
2550 if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
2551 type |= XFS_DQ_PROJ;
2552 if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
2553 type |= XFS_DQ_GROUP;
2554 /*
2555 * This type of quotas was turned off, so ignore this buffer
2556 */
2557 if (log->l_quotaoffs_flag & type)
2558 return;
2559
2560 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2561 }
2562
2563 /*
2564 * This routine replays a modification made to a buffer at runtime.
2565 * There are actually two types of buffer, regular and inode, which
2566 * are handled differently. Inode buffers are handled differently
2567 * in that we only recover a specific set of data from them, namely
2568 * the inode di_next_unlinked fields. This is because all other inode
2569 * data is actually logged via inode records and any data we replay
2570 * here which overlaps that may be stale.
2571 *
2572 * When meta-data buffers are freed at run time we log a buffer item
2573 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2574 * of the buffer in the log should not be replayed at recovery time.
2575 * This is so that if the blocks covered by the buffer are reused for
2576 * file data before we crash we don't end up replaying old, freed
2577 * meta-data into a user's file.
2578 *
2579 * To handle the cancellation of buffer log items, we make two passes
2580 * over the log during recovery. During the first we build a table of
2581 * those buffers which have been cancelled, and during the second we
2582 * only replay those buffers which do not have corresponding cancel
2583 * records in the table. See xlog_recover_buffer_pass[1,2] above
2584 * for more details on the implementation of the table of cancel records.
2585 */
2586 STATIC int
2587 xlog_recover_buffer_pass2(
2588 struct xlog *log,
2589 struct list_head *buffer_list,
2590 struct xlog_recover_item *item,
2591 xfs_lsn_t current_lsn)
2592 {
2593 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
2594 xfs_mount_t *mp = log->l_mp;
2595 xfs_buf_t *bp;
2596 int error;
2597 uint buf_flags;
2598 xfs_lsn_t lsn;
2599
2600 /*
2601 * In this pass we only want to recover all the buffers which have
2602 * not been cancelled and are not cancellation buffers themselves.
2603 */
2604 if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
2605 buf_f->blf_len, buf_f->blf_flags)) {
2606 trace_xfs_log_recover_buf_cancel(log, buf_f);
2607 return 0;
2608 }
2609
2610 trace_xfs_log_recover_buf_recover(log, buf_f);
2611
2612 buf_flags = 0;
2613 if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
2614 buf_flags |= XBF_UNMAPPED;
2615
2616 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
2617 buf_flags, NULL);
2618 if (!bp)
2619 return XFS_ERROR(ENOMEM);
2620 error = bp->b_error;
2621 if (error) {
2622 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)");
2623 goto out_release;
2624 }
2625
2626 /*
2627 * recover the buffer only if we get an LSN from it and it's less than
2628 * the lsn of the transaction we are replaying.
2629 */
2630 lsn = xlog_recover_get_buf_lsn(mp, bp);
2631 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0)
2632 goto out_release;
2633
2634 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
2635 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
2636 } else if (buf_f->blf_flags &
2637 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
2638 xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
2639 } else {
2640 xlog_recover_do_reg_buffer(mp, item, bp, buf_f);
2641 }
2642 if (error)
2643 goto out_release;
2644
2645 /*
2646 * Perform delayed write on the buffer. Asynchronous writes will be
2647 * slower when taking into account all the buffers to be flushed.
2648 *
2649 * Also make sure that only inode buffers with good sizes stay in
2650 * the buffer cache. The kernel moves inodes in buffers of 1 block
2651 * or XFS_INODE_CLUSTER_SIZE bytes, whichever is bigger. The inode
2652 * buffers in the log can be a different size if the log was generated
2653 * by an older kernel using unclustered inode buffers or a newer kernel
2654 * running with a different inode cluster size. Regardless, if the
2655 * the inode buffer size isn't MAX(blocksize, XFS_INODE_CLUSTER_SIZE)
2656 * for *our* value of XFS_INODE_CLUSTER_SIZE, then we need to keep
2657 * the buffer out of the buffer cache so that the buffer won't
2658 * overlap with future reads of those inodes.
2659 */
2660 if (XFS_DINODE_MAGIC ==
2661 be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
2662 (BBTOB(bp->b_io_length) != MAX(log->l_mp->m_sb.sb_blocksize,
2663 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) {
2664 xfs_buf_stale(bp);
2665 error = xfs_bwrite(bp);
2666 } else {
2667 ASSERT(bp->b_target->bt_mount == mp);
2668 bp->b_iodone = xlog_recover_iodone;
2669 xfs_buf_delwri_queue(bp, buffer_list);
2670 }
2671
2672 out_release:
2673 xfs_buf_relse(bp);
2674 return error;
2675 }
2676
2677 /*
2678 * Inode fork owner changes
2679 *
2680 * If we have been told that we have to reparent the inode fork, it's because an
2681 * extent swap operation on a CRC enabled filesystem has been done and we are
2682 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2683 * owners of it.
2684 *
2685 * The complexity here is that we don't have an inode context to work with, so
2686 * after we've replayed the inode we need to instantiate one. This is where the
2687 * fun begins.
2688 *
2689 * We are in the middle of log recovery, so we can't run transactions. That
2690 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2691 * that will result in the corresponding iput() running the inode through
2692 * xfs_inactive(). If we've just replayed an inode core that changes the link
2693 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2694 * transactions (bad!).
2695 *
2696 * So, to avoid this, we instantiate an inode directly from the inode core we've
2697 * just recovered. We have the buffer still locked, and all we really need to
2698 * instantiate is the inode core and the forks being modified. We can do this
2699 * manually, then run the inode btree owner change, and then tear down the
2700 * xfs_inode without having to run any transactions at all.
2701 *
2702 * Also, because we don't have a transaction context available here but need to
2703 * gather all the buffers we modify for writeback so we pass the buffer_list
2704 * instead for the operation to use.
2705 */
2706
2707 STATIC int
2708 xfs_recover_inode_owner_change(
2709 struct xfs_mount *mp,
2710 struct xfs_dinode *dip,
2711 struct xfs_inode_log_format *in_f,
2712 struct list_head *buffer_list)
2713 {
2714 struct xfs_inode *ip;
2715 int error;
2716
2717 ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
2718
2719 ip = xfs_inode_alloc(mp, in_f->ilf_ino);
2720 if (!ip)
2721 return ENOMEM;
2722
2723 /* instantiate the inode */
2724 xfs_dinode_from_disk(&ip->i_d, dip);
2725 ASSERT(ip->i_d.di_version >= 3);
2726
2727 error = xfs_iformat_fork(ip, dip);
2728 if (error)
2729 goto out_free_ip;
2730
2731
2732 if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
2733 ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
2734 error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
2735 ip->i_ino, buffer_list);
2736 if (error)
2737 goto out_free_ip;
2738 }
2739
2740 if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
2741 ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
2742 error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
2743 ip->i_ino, buffer_list);
2744 if (error)
2745 goto out_free_ip;
2746 }
2747
2748 out_free_ip:
2749 xfs_inode_free(ip);
2750 return error;
2751 }
2752
2753 STATIC int
2754 xlog_recover_inode_pass2(
2755 struct xlog *log,
2756 struct list_head *buffer_list,
2757 struct xlog_recover_item *item,
2758 xfs_lsn_t current_lsn)
2759 {
2760 xfs_inode_log_format_t *in_f;
2761 xfs_mount_t *mp = log->l_mp;
2762 xfs_buf_t *bp;
2763 xfs_dinode_t *dip;
2764 int len;
2765 xfs_caddr_t src;
2766 xfs_caddr_t dest;
2767 int error;
2768 int attr_index;
2769 uint fields;
2770 xfs_icdinode_t *dicp;
2771 uint isize;
2772 int need_free = 0;
2773
2774 if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
2775 in_f = item->ri_buf[0].i_addr;
2776 } else {
2777 in_f = kmem_alloc(sizeof(xfs_inode_log_format_t), KM_SLEEP);
2778 need_free = 1;
2779 error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
2780 if (error)
2781 goto error;
2782 }
2783
2784 /*
2785 * Inode buffers can be freed, look out for it,
2786 * and do not replay the inode.
2787 */
2788 if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
2789 in_f->ilf_len, 0)) {
2790 error = 0;
2791 trace_xfs_log_recover_inode_cancel(log, in_f);
2792 goto error;
2793 }
2794 trace_xfs_log_recover_inode_recover(log, in_f);
2795
2796 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0,
2797 &xfs_inode_buf_ops);
2798 if (!bp) {
2799 error = ENOMEM;
2800 goto error;
2801 }
2802 error = bp->b_error;
2803 if (error) {
2804 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#2)");
2805 goto out_release;
2806 }
2807 ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
2808 dip = (xfs_dinode_t *)xfs_buf_offset(bp, in_f->ilf_boffset);
2809
2810 /*
2811 * Make sure the place we're flushing out to really looks
2812 * like an inode!
2813 */
2814 if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
2815 xfs_alert(mp,
2816 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2817 __func__, dip, bp, in_f->ilf_ino);
2818 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2819 XFS_ERRLEVEL_LOW, mp);
2820 error = EFSCORRUPTED;
2821 goto out_release;
2822 }
2823 dicp = item->ri_buf[1].i_addr;
2824 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
2825 xfs_alert(mp,
2826 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2827 __func__, item, in_f->ilf_ino);
2828 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2829 XFS_ERRLEVEL_LOW, mp);
2830 error = EFSCORRUPTED;
2831 goto out_release;
2832 }
2833
2834 /*
2835 * If the inode has an LSN in it, recover the inode only if it's less
2836 * than the lsn of the transaction we are replaying. Note: we still
2837 * need to replay an owner change even though the inode is more recent
2838 * than the transaction as there is no guarantee that all the btree
2839 * blocks are more recent than this transaction, too.
2840 */
2841 if (dip->di_version >= 3) {
2842 xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
2843
2844 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
2845 trace_xfs_log_recover_inode_skip(log, in_f);
2846 error = 0;
2847 goto out_owner_change;
2848 }
2849 }
2850
2851 /*
2852 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2853 * are transactional and if ordering is necessary we can determine that
2854 * more accurately by the LSN field in the V3 inode core. Don't trust
2855 * the inode versions we might be changing them here - use the
2856 * superblock flag to determine whether we need to look at di_flushiter
2857 * to skip replay when the on disk inode is newer than the log one
2858 */
2859 if (!xfs_sb_version_hascrc(&mp->m_sb) &&
2860 dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
2861 /*
2862 * Deal with the wrap case, DI_MAX_FLUSH is less
2863 * than smaller numbers
2864 */
2865 if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
2866 dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
2867 /* do nothing */
2868 } else {
2869 trace_xfs_log_recover_inode_skip(log, in_f);
2870 error = 0;
2871 goto out_release;
2872 }
2873 }
2874
2875 /* Take the opportunity to reset the flush iteration count */
2876 dicp->di_flushiter = 0;
2877
2878 if (unlikely(S_ISREG(dicp->di_mode))) {
2879 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2880 (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
2881 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2882 XFS_ERRLEVEL_LOW, mp, dicp);
2883 xfs_alert(mp,
2884 "%s: Bad regular inode log record, rec ptr 0x%p, "
2885 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2886 __func__, item, dip, bp, in_f->ilf_ino);
2887 error = EFSCORRUPTED;
2888 goto out_release;
2889 }
2890 } else if (unlikely(S_ISDIR(dicp->di_mode))) {
2891 if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
2892 (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
2893 (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
2894 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2895 XFS_ERRLEVEL_LOW, mp, dicp);
2896 xfs_alert(mp,
2897 "%s: Bad dir inode log record, rec ptr 0x%p, "
2898 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2899 __func__, item, dip, bp, in_f->ilf_ino);
2900 error = EFSCORRUPTED;
2901 goto out_release;
2902 }
2903 }
2904 if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
2905 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2906 XFS_ERRLEVEL_LOW, mp, dicp);
2907 xfs_alert(mp,
2908 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2909 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2910 __func__, item, dip, bp, in_f->ilf_ino,
2911 dicp->di_nextents + dicp->di_anextents,
2912 dicp->di_nblocks);
2913 error = EFSCORRUPTED;
2914 goto out_release;
2915 }
2916 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
2917 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2918 XFS_ERRLEVEL_LOW, mp, dicp);
2919 xfs_alert(mp,
2920 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2921 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
2922 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
2923 error = EFSCORRUPTED;
2924 goto out_release;
2925 }
2926 isize = xfs_icdinode_size(dicp->di_version);
2927 if (unlikely(item->ri_buf[1].i_len > isize)) {
2928 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2929 XFS_ERRLEVEL_LOW, mp, dicp);
2930 xfs_alert(mp,
2931 "%s: Bad inode log record length %d, rec ptr 0x%p",
2932 __func__, item->ri_buf[1].i_len, item);
2933 error = EFSCORRUPTED;
2934 goto out_release;
2935 }
2936
2937 /* The core is in in-core format */
2938 xfs_dinode_to_disk(dip, dicp);
2939
2940 /* the rest is in on-disk format */
2941 if (item->ri_buf[1].i_len > isize) {
2942 memcpy((char *)dip + isize,
2943 item->ri_buf[1].i_addr + isize,
2944 item->ri_buf[1].i_len - isize);
2945 }
2946
2947 fields = in_f->ilf_fields;
2948 switch (fields & (XFS_ILOG_DEV | XFS_ILOG_UUID)) {
2949 case XFS_ILOG_DEV:
2950 xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
2951 break;
2952 case XFS_ILOG_UUID:
2953 memcpy(XFS_DFORK_DPTR(dip),
2954 &in_f->ilf_u.ilfu_uuid,
2955 sizeof(uuid_t));
2956 break;
2957 }
2958
2959 if (in_f->ilf_size == 2)
2960 goto out_owner_change;
2961 len = item->ri_buf[2].i_len;
2962 src = item->ri_buf[2].i_addr;
2963 ASSERT(in_f->ilf_size <= 4);
2964 ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
2965 ASSERT(!(fields & XFS_ILOG_DFORK) ||
2966 (len == in_f->ilf_dsize));
2967
2968 switch (fields & XFS_ILOG_DFORK) {
2969 case XFS_ILOG_DDATA:
2970 case XFS_ILOG_DEXT:
2971 memcpy(XFS_DFORK_DPTR(dip), src, len);
2972 break;
2973
2974 case XFS_ILOG_DBROOT:
2975 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
2976 (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
2977 XFS_DFORK_DSIZE(dip, mp));
2978 break;
2979
2980 default:
2981 /*
2982 * There are no data fork flags set.
2983 */
2984 ASSERT((fields & XFS_ILOG_DFORK) == 0);
2985 break;
2986 }
2987
2988 /*
2989 * If we logged any attribute data, recover it. There may or
2990 * may not have been any other non-core data logged in this
2991 * transaction.
2992 */
2993 if (in_f->ilf_fields & XFS_ILOG_AFORK) {
2994 if (in_f->ilf_fields & XFS_ILOG_DFORK) {
2995 attr_index = 3;
2996 } else {
2997 attr_index = 2;
2998 }
2999 len = item->ri_buf[attr_index].i_len;
3000 src = item->ri_buf[attr_index].i_addr;
3001 ASSERT(len == in_f->ilf_asize);
3002
3003 switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
3004 case XFS_ILOG_ADATA:
3005 case XFS_ILOG_AEXT:
3006 dest = XFS_DFORK_APTR(dip);
3007 ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
3008 memcpy(dest, src, len);
3009 break;
3010
3011 case XFS_ILOG_ABROOT:
3012 dest = XFS_DFORK_APTR(dip);
3013 xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
3014 len, (xfs_bmdr_block_t*)dest,
3015 XFS_DFORK_ASIZE(dip, mp));
3016 break;
3017
3018 default:
3019 xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
3020 ASSERT(0);
3021 error = EIO;
3022 goto out_release;
3023 }
3024 }
3025
3026 out_owner_change:
3027 if (in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER))
3028 error = xfs_recover_inode_owner_change(mp, dip, in_f,
3029 buffer_list);
3030 /* re-generate the checksum. */
3031 xfs_dinode_calc_crc(log->l_mp, dip);
3032
3033 ASSERT(bp->b_target->bt_mount == mp);
3034 bp->b_iodone = xlog_recover_iodone;
3035 xfs_buf_delwri_queue(bp, buffer_list);
3036
3037 out_release:
3038 xfs_buf_relse(bp);
3039 error:
3040 if (need_free)
3041 kmem_free(in_f);
3042 return XFS_ERROR(error);
3043 }
3044
3045 /*
3046 * Recover QUOTAOFF records. We simply make a note of it in the xlog
3047 * structure, so that we know not to do any dquot item or dquot buffer recovery,
3048 * of that type.
3049 */
3050 STATIC int
3051 xlog_recover_quotaoff_pass1(
3052 struct xlog *log,
3053 struct xlog_recover_item *item)
3054 {
3055 xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
3056 ASSERT(qoff_f);
3057
3058 /*
3059 * The logitem format's flag tells us if this was user quotaoff,
3060 * group/project quotaoff or both.
3061 */
3062 if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
3063 log->l_quotaoffs_flag |= XFS_DQ_USER;
3064 if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
3065 log->l_quotaoffs_flag |= XFS_DQ_PROJ;
3066 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
3067 log->l_quotaoffs_flag |= XFS_DQ_GROUP;
3068
3069 return (0);
3070 }
3071
3072 /*
3073 * Recover a dquot record
3074 */
3075 STATIC int
3076 xlog_recover_dquot_pass2(
3077 struct xlog *log,
3078 struct list_head *buffer_list,
3079 struct xlog_recover_item *item,
3080 xfs_lsn_t current_lsn)
3081 {
3082 xfs_mount_t *mp = log->l_mp;
3083 xfs_buf_t *bp;
3084 struct xfs_disk_dquot *ddq, *recddq;
3085 int error;
3086 xfs_dq_logformat_t *dq_f;
3087 uint type;
3088
3089
3090 /*
3091 * Filesystems are required to send in quota flags at mount time.
3092 */
3093 if (mp->m_qflags == 0)
3094 return (0);
3095
3096 recddq = item->ri_buf[1].i_addr;
3097 if (recddq == NULL) {
3098 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
3099 return XFS_ERROR(EIO);
3100 }
3101 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) {
3102 xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
3103 item->ri_buf[1].i_len, __func__);
3104 return XFS_ERROR(EIO);
3105 }
3106
3107 /*
3108 * This type of quotas was turned off, so ignore this record.
3109 */
3110 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3111 ASSERT(type);
3112 if (log->l_quotaoffs_flag & type)
3113 return (0);
3114
3115 /*
3116 * At this point we know that quota was _not_ turned off.
3117 * Since the mount flags are not indicating to us otherwise, this
3118 * must mean that quota is on, and the dquot needs to be replayed.
3119 * Remember that we may not have fully recovered the superblock yet,
3120 * so we can't do the usual trick of looking at the SB quota bits.
3121 *
3122 * The other possibility, of course, is that the quota subsystem was
3123 * removed since the last mount - ENOSYS.
3124 */
3125 dq_f = item->ri_buf[0].i_addr;
3126 ASSERT(dq_f);
3127 error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3128 "xlog_recover_dquot_pass2 (log copy)");
3129 if (error)
3130 return XFS_ERROR(EIO);
3131 ASSERT(dq_f->qlf_len == 1);
3132
3133 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
3134 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
3135 NULL);
3136 if (error)
3137 return error;
3138
3139 ASSERT(bp);
3140 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset);
3141
3142 /*
3143 * At least the magic num portion should be on disk because this
3144 * was among a chunk of dquots created earlier, and we did some
3145 * minimal initialization then.
3146 */
3147 error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
3148 "xlog_recover_dquot_pass2");
3149 if (error) {
3150 xfs_buf_relse(bp);
3151 return XFS_ERROR(EIO);
3152 }
3153
3154 /*
3155 * If the dquot has an LSN in it, recover the dquot only if it's less
3156 * than the lsn of the transaction we are replaying.
3157 */
3158 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3159 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
3160 xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
3161
3162 if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
3163 goto out_release;
3164 }
3165 }
3166
3167 memcpy(ddq, recddq, item->ri_buf[1].i_len);
3168 if (xfs_sb_version_hascrc(&mp->m_sb)) {
3169 xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
3170 XFS_DQUOT_CRC_OFF);
3171 }
3172
3173 ASSERT(dq_f->qlf_size == 2);
3174 ASSERT(bp->b_target->bt_mount == mp);
3175 bp->b_iodone = xlog_recover_iodone;
3176 xfs_buf_delwri_queue(bp, buffer_list);
3177
3178 out_release:
3179 xfs_buf_relse(bp);
3180 return 0;
3181 }
3182
3183 /*
3184 * This routine is called to create an in-core extent free intent
3185 * item from the efi format structure which was logged on disk.
3186 * It allocates an in-core efi, copies the extents from the format
3187 * structure into it, and adds the efi to the AIL with the given
3188 * LSN.
3189 */
3190 STATIC int
3191 xlog_recover_efi_pass2(
3192 struct xlog *log,
3193 struct xlog_recover_item *item,
3194 xfs_lsn_t lsn)
3195 {
3196 int error;
3197 xfs_mount_t *mp = log->l_mp;
3198 xfs_efi_log_item_t *efip;
3199 xfs_efi_log_format_t *efi_formatp;
3200
3201 efi_formatp = item->ri_buf[0].i_addr;
3202
3203 efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
3204 if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
3205 &(efip->efi_format)))) {
3206 xfs_efi_item_free(efip);
3207 return error;
3208 }
3209 atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
3210
3211 spin_lock(&log->l_ailp->xa_lock);
3212 /*
3213 * xfs_trans_ail_update() drops the AIL lock.
3214 */
3215 xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
3216 return 0;
3217 }
3218
3219
3220 /*
3221 * This routine is called when an efd format structure is found in
3222 * a committed transaction in the log. It's purpose is to cancel
3223 * the corresponding efi if it was still in the log. To do this
3224 * it searches the AIL for the efi with an id equal to that in the
3225 * efd format structure. If we find it, we remove the efi from the
3226 * AIL and free it.
3227 */
3228 STATIC int
3229 xlog_recover_efd_pass2(
3230 struct xlog *log,
3231 struct xlog_recover_item *item)
3232 {
3233 xfs_efd_log_format_t *efd_formatp;
3234 xfs_efi_log_item_t *efip = NULL;
3235 xfs_log_item_t *lip;
3236 __uint64_t efi_id;
3237 struct xfs_ail_cursor cur;
3238 struct xfs_ail *ailp = log->l_ailp;
3239
3240 efd_formatp = item->ri_buf[0].i_addr;
3241 ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
3242 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
3243 (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
3244 ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
3245 efi_id = efd_formatp->efd_efi_id;
3246
3247 /*
3248 * Search for the efi with the id in the efd format structure
3249 * in the AIL.
3250 */
3251 spin_lock(&ailp->xa_lock);
3252 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3253 while (lip != NULL) {
3254 if (lip->li_type == XFS_LI_EFI) {
3255 efip = (xfs_efi_log_item_t *)lip;
3256 if (efip->efi_format.efi_id == efi_id) {
3257 /*
3258 * xfs_trans_ail_delete() drops the
3259 * AIL lock.
3260 */
3261 xfs_trans_ail_delete(ailp, lip,
3262 SHUTDOWN_CORRUPT_INCORE);
3263 xfs_efi_item_free(efip);
3264 spin_lock(&ailp->xa_lock);
3265 break;
3266 }
3267 }
3268 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3269 }
3270 xfs_trans_ail_cursor_done(ailp, &cur);
3271 spin_unlock(&ailp->xa_lock);
3272
3273 return 0;
3274 }
3275
3276 /*
3277 * This routine is called when an inode create format structure is found in a
3278 * committed transaction in the log. It's purpose is to initialise the inodes
3279 * being allocated on disk. This requires us to get inode cluster buffers that
3280 * match the range to be intialised, stamped with inode templates and written
3281 * by delayed write so that subsequent modifications will hit the cached buffer
3282 * and only need writing out at the end of recovery.
3283 */
3284 STATIC int
3285 xlog_recover_do_icreate_pass2(
3286 struct xlog *log,
3287 struct list_head *buffer_list,
3288 xlog_recover_item_t *item)
3289 {
3290 struct xfs_mount *mp = log->l_mp;
3291 struct xfs_icreate_log *icl;
3292 xfs_agnumber_t agno;
3293 xfs_agblock_t agbno;
3294 unsigned int count;
3295 unsigned int isize;
3296 xfs_agblock_t length;
3297
3298 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
3299 if (icl->icl_type != XFS_LI_ICREATE) {
3300 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
3301 return EINVAL;
3302 }
3303
3304 if (icl->icl_size != 1) {
3305 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
3306 return EINVAL;
3307 }
3308
3309 agno = be32_to_cpu(icl->icl_ag);
3310 if (agno >= mp->m_sb.sb_agcount) {
3311 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
3312 return EINVAL;
3313 }
3314 agbno = be32_to_cpu(icl->icl_agbno);
3315 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
3316 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
3317 return EINVAL;
3318 }
3319 isize = be32_to_cpu(icl->icl_isize);
3320 if (isize != mp->m_sb.sb_inodesize) {
3321 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
3322 return EINVAL;
3323 }
3324 count = be32_to_cpu(icl->icl_count);
3325 if (!count) {
3326 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
3327 return EINVAL;
3328 }
3329 length = be32_to_cpu(icl->icl_length);
3330 if (!length || length >= mp->m_sb.sb_agblocks) {
3331 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
3332 return EINVAL;
3333 }
3334
3335 /* existing allocation is fixed value */
3336 ASSERT(count == XFS_IALLOC_INODES(mp));
3337 ASSERT(length == XFS_IALLOC_BLOCKS(mp));
3338 if (count != XFS_IALLOC_INODES(mp) ||
3339 length != XFS_IALLOC_BLOCKS(mp)) {
3340 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2");
3341 return EINVAL;
3342 }
3343
3344 /*
3345 * Inode buffers can be freed. Do not replay the inode initialisation as
3346 * we could be overwriting something written after this inode buffer was
3347 * cancelled.
3348 *
3349 * XXX: we need to iterate all buffers and only init those that are not
3350 * cancelled. I think that a more fine grained factoring of
3351 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3352 * done easily.
3353 */
3354 if (xlog_check_buffer_cancelled(log,
3355 XFS_AGB_TO_DADDR(mp, agno, agbno), length, 0))
3356 return 0;
3357
3358 xfs_ialloc_inode_init(mp, NULL, buffer_list, agno, agbno, length,
3359 be32_to_cpu(icl->icl_gen));
3360 return 0;
3361 }
3362
3363 /*
3364 * Free up any resources allocated by the transaction
3365 *
3366 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3367 */
3368 STATIC void
3369 xlog_recover_free_trans(
3370 struct xlog_recover *trans)
3371 {
3372 xlog_recover_item_t *item, *n;
3373 int i;
3374
3375 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
3376 /* Free the regions in the item. */
3377 list_del(&item->ri_list);
3378 for (i = 0; i < item->ri_cnt; i++)
3379 kmem_free(item->ri_buf[i].i_addr);
3380 /* Free the item itself */
3381 kmem_free(item->ri_buf);
3382 kmem_free(item);
3383 }
3384 /* Free the transaction recover structure */
3385 kmem_free(trans);
3386 }
3387
3388 STATIC void
3389 xlog_recover_buffer_ra_pass2(
3390 struct xlog *log,
3391 struct xlog_recover_item *item)
3392 {
3393 struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
3394 struct xfs_mount *mp = log->l_mp;
3395
3396 if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
3397 buf_f->blf_len, buf_f->blf_flags)) {
3398 return;
3399 }
3400
3401 xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
3402 buf_f->blf_len, NULL);
3403 }
3404
3405 STATIC void
3406 xlog_recover_inode_ra_pass2(
3407 struct xlog *log,
3408 struct xlog_recover_item *item)
3409 {
3410 struct xfs_inode_log_format ilf_buf;
3411 struct xfs_inode_log_format *ilfp;
3412 struct xfs_mount *mp = log->l_mp;
3413 int error;
3414
3415 if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
3416 ilfp = item->ri_buf[0].i_addr;
3417 } else {
3418 ilfp = &ilf_buf;
3419 memset(ilfp, 0, sizeof(*ilfp));
3420 error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
3421 if (error)
3422 return;
3423 }
3424
3425 if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
3426 return;
3427
3428 xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
3429 ilfp->ilf_len, &xfs_inode_buf_ra_ops);
3430 }
3431
3432 STATIC void
3433 xlog_recover_dquot_ra_pass2(
3434 struct xlog *log,
3435 struct xlog_recover_item *item)
3436 {
3437 struct xfs_mount *mp = log->l_mp;
3438 struct xfs_disk_dquot *recddq;
3439 struct xfs_dq_logformat *dq_f;
3440 uint type;
3441
3442
3443 if (mp->m_qflags == 0)
3444 return;
3445
3446 recddq = item->ri_buf[1].i_addr;
3447 if (recddq == NULL)
3448 return;
3449 if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
3450 return;
3451
3452 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
3453 ASSERT(type);
3454 if (log->l_quotaoffs_flag & type)
3455 return;
3456
3457 dq_f = item->ri_buf[0].i_addr;
3458 ASSERT(dq_f);
3459 ASSERT(dq_f->qlf_len == 1);
3460
3461 xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno,
3462 XFS_FSB_TO_BB(mp, dq_f->qlf_len), NULL);
3463 }
3464
3465 STATIC void
3466 xlog_recover_ra_pass2(
3467 struct xlog *log,
3468 struct xlog_recover_item *item)
3469 {
3470 switch (ITEM_TYPE(item)) {
3471 case XFS_LI_BUF:
3472 xlog_recover_buffer_ra_pass2(log, item);
3473 break;
3474 case XFS_LI_INODE:
3475 xlog_recover_inode_ra_pass2(log, item);
3476 break;
3477 case XFS_LI_DQUOT:
3478 xlog_recover_dquot_ra_pass2(log, item);
3479 break;
3480 case XFS_LI_EFI:
3481 case XFS_LI_EFD:
3482 case XFS_LI_QUOTAOFF:
3483 default:
3484 break;
3485 }
3486 }
3487
3488 STATIC int
3489 xlog_recover_commit_pass1(
3490 struct xlog *log,
3491 struct xlog_recover *trans,
3492 struct xlog_recover_item *item)
3493 {
3494 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
3495
3496 switch (ITEM_TYPE(item)) {
3497 case XFS_LI_BUF:
3498 return xlog_recover_buffer_pass1(log, item);
3499 case XFS_LI_QUOTAOFF:
3500 return xlog_recover_quotaoff_pass1(log, item);
3501 case XFS_LI_INODE:
3502 case XFS_LI_EFI:
3503 case XFS_LI_EFD:
3504 case XFS_LI_DQUOT:
3505 case XFS_LI_ICREATE:
3506 /* nothing to do in pass 1 */
3507 return 0;
3508 default:
3509 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3510 __func__, ITEM_TYPE(item));
3511 ASSERT(0);
3512 return XFS_ERROR(EIO);
3513 }
3514 }
3515
3516 STATIC int
3517 xlog_recover_commit_pass2(
3518 struct xlog *log,
3519 struct xlog_recover *trans,
3520 struct list_head *buffer_list,
3521 struct xlog_recover_item *item)
3522 {
3523 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
3524
3525 switch (ITEM_TYPE(item)) {
3526 case XFS_LI_BUF:
3527 return xlog_recover_buffer_pass2(log, buffer_list, item,
3528 trans->r_lsn);
3529 case XFS_LI_INODE:
3530 return xlog_recover_inode_pass2(log, buffer_list, item,
3531 trans->r_lsn);
3532 case XFS_LI_EFI:
3533 return xlog_recover_efi_pass2(log, item, trans->r_lsn);
3534 case XFS_LI_EFD:
3535 return xlog_recover_efd_pass2(log, item);
3536 case XFS_LI_DQUOT:
3537 return xlog_recover_dquot_pass2(log, buffer_list, item,
3538 trans->r_lsn);
3539 case XFS_LI_ICREATE:
3540 return xlog_recover_do_icreate_pass2(log, buffer_list, item);
3541 case XFS_LI_QUOTAOFF:
3542 /* nothing to do in pass2 */
3543 return 0;
3544 default:
3545 xfs_warn(log->l_mp, "%s: invalid item type (%d)",
3546 __func__, ITEM_TYPE(item));
3547 ASSERT(0);
3548 return XFS_ERROR(EIO);
3549 }
3550 }
3551
3552 STATIC int
3553 xlog_recover_items_pass2(
3554 struct xlog *log,
3555 struct xlog_recover *trans,
3556 struct list_head *buffer_list,
3557 struct list_head *item_list)
3558 {
3559 struct xlog_recover_item *item;
3560 int error = 0;
3561
3562 list_for_each_entry(item, item_list, ri_list) {
3563 error = xlog_recover_commit_pass2(log, trans,
3564 buffer_list, item);
3565 if (error)
3566 return error;
3567 }
3568
3569 return error;
3570 }
3571
3572 /*
3573 * Perform the transaction.
3574 *
3575 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3576 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3577 */
3578 STATIC int
3579 xlog_recover_commit_trans(
3580 struct xlog *log,
3581 struct xlog_recover *trans,
3582 int pass)
3583 {
3584 int error = 0;
3585 int error2;
3586 int items_queued = 0;
3587 struct xlog_recover_item *item;
3588 struct xlog_recover_item *next;
3589 LIST_HEAD (buffer_list);
3590 LIST_HEAD (ra_list);
3591 LIST_HEAD (done_list);
3592
3593 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3594
3595 hlist_del(&trans->r_list);
3596
3597 error = xlog_recover_reorder_trans(log, trans, pass);
3598 if (error)
3599 return error;
3600
3601 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
3602 switch (pass) {
3603 case XLOG_RECOVER_PASS1:
3604 error = xlog_recover_commit_pass1(log, trans, item);
3605 break;
3606 case XLOG_RECOVER_PASS2:
3607 xlog_recover_ra_pass2(log, item);
3608 list_move_tail(&item->ri_list, &ra_list);
3609 items_queued++;
3610 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
3611 error = xlog_recover_items_pass2(log, trans,
3612 &buffer_list, &ra_list);
3613 list_splice_tail_init(&ra_list, &done_list);
3614 items_queued = 0;
3615 }
3616
3617 break;
3618 default:
3619 ASSERT(0);
3620 }
3621
3622 if (error)
3623 goto out;
3624 }
3625
3626 out:
3627 if (!list_empty(&ra_list)) {
3628 if (!error)
3629 error = xlog_recover_items_pass2(log, trans,
3630 &buffer_list, &ra_list);
3631 list_splice_tail_init(&ra_list, &done_list);
3632 }
3633
3634 if (!list_empty(&done_list))
3635 list_splice_init(&done_list, &trans->r_itemq);
3636
3637 xlog_recover_free_trans(trans);
3638
3639 error2 = xfs_buf_delwri_submit(&buffer_list);
3640 return error ? error : error2;
3641 }
3642
3643 STATIC int
3644 xlog_recover_unmount_trans(
3645 struct xlog *log,
3646 struct xlog_recover *trans)
3647 {
3648 /* Do nothing now */
3649 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
3650 return 0;
3651 }
3652
3653 /*
3654 * There are two valid states of the r_state field. 0 indicates that the
3655 * transaction structure is in a normal state. We have either seen the
3656 * start of the transaction or the last operation we added was not a partial
3657 * operation. If the last operation we added to the transaction was a
3658 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3659 *
3660 * NOTE: skip LRs with 0 data length.
3661 */
3662 STATIC int
3663 xlog_recover_process_data(
3664 struct xlog *log,
3665 struct hlist_head rhash[],
3666 struct xlog_rec_header *rhead,
3667 xfs_caddr_t dp,
3668 int pass)
3669 {
3670 xfs_caddr_t lp;
3671 int num_logops;
3672 xlog_op_header_t *ohead;
3673 xlog_recover_t *trans;
3674 xlog_tid_t tid;
3675 int error;
3676 unsigned long hash;
3677 uint flags;
3678
3679 lp = dp + be32_to_cpu(rhead->h_len);
3680 num_logops = be32_to_cpu(rhead->h_num_logops);
3681
3682 /* check the log format matches our own - else we can't recover */
3683 if (xlog_header_check_recover(log->l_mp, rhead))
3684 return (XFS_ERROR(EIO));
3685
3686 while ((dp < lp) && num_logops) {
3687 ASSERT(dp + sizeof(xlog_op_header_t) <= lp);
3688 ohead = (xlog_op_header_t *)dp;
3689 dp += sizeof(xlog_op_header_t);
3690 if (ohead->oh_clientid != XFS_TRANSACTION &&
3691 ohead->oh_clientid != XFS_LOG) {
3692 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
3693 __func__, ohead->oh_clientid);
3694 ASSERT(0);
3695 return (XFS_ERROR(EIO));
3696 }
3697 tid = be32_to_cpu(ohead->oh_tid);
3698 hash = XLOG_RHASH(tid);
3699 trans = xlog_recover_find_tid(&rhash[hash], tid);
3700 if (trans == NULL) { /* not found; add new tid */
3701 if (ohead->oh_flags & XLOG_START_TRANS)
3702 xlog_recover_new_tid(&rhash[hash], tid,
3703 be64_to_cpu(rhead->h_lsn));
3704 } else {
3705 if (dp + be32_to_cpu(ohead->oh_len) > lp) {
3706 xfs_warn(log->l_mp, "%s: bad length 0x%x",
3707 __func__, be32_to_cpu(ohead->oh_len));
3708 WARN_ON(1);
3709 return (XFS_ERROR(EIO));
3710 }
3711 flags = ohead->oh_flags & ~XLOG_END_TRANS;
3712 if (flags & XLOG_WAS_CONT_TRANS)
3713 flags &= ~XLOG_CONTINUE_TRANS;
3714 switch (flags) {
3715 case XLOG_COMMIT_TRANS:
3716 error = xlog_recover_commit_trans(log,
3717 trans, pass);
3718 break;
3719 case XLOG_UNMOUNT_TRANS:
3720 error = xlog_recover_unmount_trans(log, trans);
3721 break;
3722 case XLOG_WAS_CONT_TRANS:
3723 error = xlog_recover_add_to_cont_trans(log,
3724 trans, dp,
3725 be32_to_cpu(ohead->oh_len));
3726 break;
3727 case XLOG_START_TRANS:
3728 xfs_warn(log->l_mp, "%s: bad transaction",
3729 __func__);
3730 ASSERT(0);
3731 error = XFS_ERROR(EIO);
3732 break;
3733 case 0:
3734 case XLOG_CONTINUE_TRANS:
3735 error = xlog_recover_add_to_trans(log, trans,
3736 dp, be32_to_cpu(ohead->oh_len));
3737 break;
3738 default:
3739 xfs_warn(log->l_mp, "%s: bad flag 0x%x",
3740 __func__, flags);
3741 ASSERT(0);
3742 error = XFS_ERROR(EIO);
3743 break;
3744 }
3745 if (error)
3746 return error;
3747 }
3748 dp += be32_to_cpu(ohead->oh_len);
3749 num_logops--;
3750 }
3751 return 0;
3752 }
3753
3754 /*
3755 * Process an extent free intent item that was recovered from
3756 * the log. We need to free the extents that it describes.
3757 */
3758 STATIC int
3759 xlog_recover_process_efi(
3760 xfs_mount_t *mp,
3761 xfs_efi_log_item_t *efip)
3762 {
3763 xfs_efd_log_item_t *efdp;
3764 xfs_trans_t *tp;
3765 int i;
3766 int error = 0;
3767 xfs_extent_t *extp;
3768 xfs_fsblock_t startblock_fsb;
3769
3770 ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
3771
3772 /*
3773 * First check the validity of the extents described by the
3774 * EFI. If any are bad, then assume that all are bad and
3775 * just toss the EFI.
3776 */
3777 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3778 extp = &(efip->efi_format.efi_extents[i]);
3779 startblock_fsb = XFS_BB_TO_FSB(mp,
3780 XFS_FSB_TO_DADDR(mp, extp->ext_start));
3781 if ((startblock_fsb == 0) ||
3782 (extp->ext_len == 0) ||
3783 (startblock_fsb >= mp->m_sb.sb_dblocks) ||
3784 (extp->ext_len >= mp->m_sb.sb_agblocks)) {
3785 /*
3786 * This will pull the EFI from the AIL and
3787 * free the memory associated with it.
3788 */
3789 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3790 xfs_efi_release(efip, efip->efi_format.efi_nextents);
3791 return XFS_ERROR(EIO);
3792 }
3793 }
3794
3795 tp = xfs_trans_alloc(mp, 0);
3796 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
3797 if (error)
3798 goto abort_error;
3799 efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
3800
3801 for (i = 0; i < efip->efi_format.efi_nextents; i++) {
3802 extp = &(efip->efi_format.efi_extents[i]);
3803 error = xfs_free_extent(tp, extp->ext_start, extp->ext_len);
3804 if (error)
3805 goto abort_error;
3806 xfs_trans_log_efd_extent(tp, efdp, extp->ext_start,
3807 extp->ext_len);
3808 }
3809
3810 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
3811 error = xfs_trans_commit(tp, 0);
3812 return error;
3813
3814 abort_error:
3815 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3816 return error;
3817 }
3818
3819 /*
3820 * When this is called, all of the EFIs which did not have
3821 * corresponding EFDs should be in the AIL. What we do now
3822 * is free the extents associated with each one.
3823 *
3824 * Since we process the EFIs in normal transactions, they
3825 * will be removed at some point after the commit. This prevents
3826 * us from just walking down the list processing each one.
3827 * We'll use a flag in the EFI to skip those that we've already
3828 * processed and use the AIL iteration mechanism's generation
3829 * count to try to speed this up at least a bit.
3830 *
3831 * When we start, we know that the EFIs are the only things in
3832 * the AIL. As we process them, however, other items are added
3833 * to the AIL. Since everything added to the AIL must come after
3834 * everything already in the AIL, we stop processing as soon as
3835 * we see something other than an EFI in the AIL.
3836 */
3837 STATIC int
3838 xlog_recover_process_efis(
3839 struct xlog *log)
3840 {
3841 xfs_log_item_t *lip;
3842 xfs_efi_log_item_t *efip;
3843 int error = 0;
3844 struct xfs_ail_cursor cur;
3845 struct xfs_ail *ailp;
3846
3847 ailp = log->l_ailp;
3848 spin_lock(&ailp->xa_lock);
3849 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
3850 while (lip != NULL) {
3851 /*
3852 * We're done when we see something other than an EFI.
3853 * There should be no EFIs left in the AIL now.
3854 */
3855 if (lip->li_type != XFS_LI_EFI) {
3856 #ifdef DEBUG
3857 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
3858 ASSERT(lip->li_type != XFS_LI_EFI);
3859 #endif
3860 break;
3861 }
3862
3863 /*
3864 * Skip EFIs that we've already processed.
3865 */
3866 efip = (xfs_efi_log_item_t *)lip;
3867 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
3868 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3869 continue;
3870 }
3871
3872 spin_unlock(&ailp->xa_lock);
3873 error = xlog_recover_process_efi(log->l_mp, efip);
3874 spin_lock(&ailp->xa_lock);
3875 if (error)
3876 goto out;
3877 lip = xfs_trans_ail_cursor_next(ailp, &cur);
3878 }
3879 out:
3880 xfs_trans_ail_cursor_done(ailp, &cur);
3881 spin_unlock(&ailp->xa_lock);
3882 return error;
3883 }
3884
3885 /*
3886 * This routine performs a transaction to null out a bad inode pointer
3887 * in an agi unlinked inode hash bucket.
3888 */
3889 STATIC void
3890 xlog_recover_clear_agi_bucket(
3891 xfs_mount_t *mp,
3892 xfs_agnumber_t agno,
3893 int bucket)
3894 {
3895 xfs_trans_t *tp;
3896 xfs_agi_t *agi;
3897 xfs_buf_t *agibp;
3898 int offset;
3899 int error;
3900
3901 tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
3902 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
3903 if (error)
3904 goto out_abort;
3905
3906 error = xfs_read_agi(mp, tp, agno, &agibp);
3907 if (error)
3908 goto out_abort;
3909
3910 agi = XFS_BUF_TO_AGI(agibp);
3911 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
3912 offset = offsetof(xfs_agi_t, agi_unlinked) +
3913 (sizeof(xfs_agino_t) * bucket);
3914 xfs_trans_log_buf(tp, agibp, offset,
3915 (offset + sizeof(xfs_agino_t) - 1));
3916
3917 error = xfs_trans_commit(tp, 0);
3918 if (error)
3919 goto out_error;
3920 return;
3921
3922 out_abort:
3923 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
3924 out_error:
3925 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
3926 return;
3927 }
3928
3929 STATIC xfs_agino_t
3930 xlog_recover_process_one_iunlink(
3931 struct xfs_mount *mp,
3932 xfs_agnumber_t agno,
3933 xfs_agino_t agino,
3934 int bucket)
3935 {
3936 struct xfs_buf *ibp;
3937 struct xfs_dinode *dip;
3938 struct xfs_inode *ip;
3939 xfs_ino_t ino;
3940 int error;
3941
3942 ino = XFS_AGINO_TO_INO(mp, agno, agino);
3943 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
3944 if (error)
3945 goto fail;
3946
3947 /*
3948 * Get the on disk inode to find the next inode in the bucket.
3949 */
3950 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
3951 if (error)
3952 goto fail_iput;
3953
3954 ASSERT(ip->i_d.di_nlink == 0);
3955 ASSERT(ip->i_d.di_mode != 0);
3956
3957 /* setup for the next pass */
3958 agino = be32_to_cpu(dip->di_next_unlinked);
3959 xfs_buf_relse(ibp);
3960
3961 /*
3962 * Prevent any DMAPI event from being sent when the reference on
3963 * the inode is dropped.
3964 */
3965 ip->i_d.di_dmevmask = 0;
3966
3967 IRELE(ip);
3968 return agino;
3969
3970 fail_iput:
3971 IRELE(ip);
3972 fail:
3973 /*
3974 * We can't read in the inode this bucket points to, or this inode
3975 * is messed up. Just ditch this bucket of inodes. We will lose
3976 * some inodes and space, but at least we won't hang.
3977 *
3978 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3979 * clear the inode pointer in the bucket.
3980 */
3981 xlog_recover_clear_agi_bucket(mp, agno, bucket);
3982 return NULLAGINO;
3983 }
3984
3985 /*
3986 * xlog_iunlink_recover
3987 *
3988 * This is called during recovery to process any inodes which
3989 * we unlinked but not freed when the system crashed. These
3990 * inodes will be on the lists in the AGI blocks. What we do
3991 * here is scan all the AGIs and fully truncate and free any
3992 * inodes found on the lists. Each inode is removed from the
3993 * lists when it has been fully truncated and is freed. The
3994 * freeing of the inode and its removal from the list must be
3995 * atomic.
3996 */
3997 STATIC void
3998 xlog_recover_process_iunlinks(
3999 struct xlog *log)
4000 {
4001 xfs_mount_t *mp;
4002 xfs_agnumber_t agno;
4003 xfs_agi_t *agi;
4004 xfs_buf_t *agibp;
4005 xfs_agino_t agino;
4006 int bucket;
4007 int error;
4008 uint mp_dmevmask;
4009
4010 mp = log->l_mp;
4011
4012 /*
4013 * Prevent any DMAPI event from being sent while in this function.
4014 */
4015 mp_dmevmask = mp->m_dmevmask;
4016 mp->m_dmevmask = 0;
4017
4018 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4019 /*
4020 * Find the agi for this ag.
4021 */
4022 error = xfs_read_agi(mp, NULL, agno, &agibp);
4023 if (error) {
4024 /*
4025 * AGI is b0rked. Don't process it.
4026 *
4027 * We should probably mark the filesystem as corrupt
4028 * after we've recovered all the ag's we can....
4029 */
4030 continue;
4031 }
4032 /*
4033 * Unlock the buffer so that it can be acquired in the normal
4034 * course of the transaction to truncate and free each inode.
4035 * Because we are not racing with anyone else here for the AGI
4036 * buffer, we don't even need to hold it locked to read the
4037 * initial unlinked bucket entries out of the buffer. We keep
4038 * buffer reference though, so that it stays pinned in memory
4039 * while we need the buffer.
4040 */
4041 agi = XFS_BUF_TO_AGI(agibp);
4042 xfs_buf_unlock(agibp);
4043
4044 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
4045 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
4046 while (agino != NULLAGINO) {
4047 agino = xlog_recover_process_one_iunlink(mp,
4048 agno, agino, bucket);
4049 }
4050 }
4051 xfs_buf_rele(agibp);
4052 }
4053
4054 mp->m_dmevmask = mp_dmevmask;
4055 }
4056
4057 /*
4058 * Upack the log buffer data and crc check it. If the check fails, issue a
4059 * warning if and only if the CRC in the header is non-zero. This makes the
4060 * check an advisory warning, and the zero CRC check will prevent failure
4061 * warnings from being emitted when upgrading the kernel from one that does not
4062 * add CRCs by default.
4063 *
4064 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
4065 * corruption failure
4066 */
4067 STATIC int
4068 xlog_unpack_data_crc(
4069 struct xlog_rec_header *rhead,
4070 xfs_caddr_t dp,
4071 struct xlog *log)
4072 {
4073 __le32 crc;
4074
4075 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
4076 if (crc != rhead->h_crc) {
4077 if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
4078 xfs_alert(log->l_mp,
4079 "log record CRC mismatch: found 0x%x, expected 0x%x.\n",
4080 le32_to_cpu(rhead->h_crc),
4081 le32_to_cpu(crc));
4082 xfs_hex_dump(dp, 32);
4083 }
4084
4085 /*
4086 * If we've detected a log record corruption, then we can't
4087 * recover past this point. Abort recovery if we are enforcing
4088 * CRC protection by punting an error back up the stack.
4089 */
4090 if (xfs_sb_version_hascrc(&log->l_mp->m_sb))
4091 return EFSCORRUPTED;
4092 }
4093
4094 return 0;
4095 }
4096
4097 STATIC int
4098 xlog_unpack_data(
4099 struct xlog_rec_header *rhead,
4100 xfs_caddr_t dp,
4101 struct xlog *log)
4102 {
4103 int i, j, k;
4104 int error;
4105
4106 error = xlog_unpack_data_crc(rhead, dp, log);
4107 if (error)
4108 return error;
4109
4110 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
4111 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
4112 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
4113 dp += BBSIZE;
4114 }
4115
4116 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4117 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
4118 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
4119 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4120 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
4121 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
4122 dp += BBSIZE;
4123 }
4124 }
4125
4126 return 0;
4127 }
4128
4129 STATIC int
4130 xlog_valid_rec_header(
4131 struct xlog *log,
4132 struct xlog_rec_header *rhead,
4133 xfs_daddr_t blkno)
4134 {
4135 int hlen;
4136
4137 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
4138 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4139 XFS_ERRLEVEL_LOW, log->l_mp);
4140 return XFS_ERROR(EFSCORRUPTED);
4141 }
4142 if (unlikely(
4143 (!rhead->h_version ||
4144 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) {
4145 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
4146 __func__, be32_to_cpu(rhead->h_version));
4147 return XFS_ERROR(EIO);
4148 }
4149
4150 /* LR body must have data or it wouldn't have been written */
4151 hlen = be32_to_cpu(rhead->h_len);
4152 if (unlikely( hlen <= 0 || hlen > INT_MAX )) {
4153 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4154 XFS_ERRLEVEL_LOW, log->l_mp);
4155 return XFS_ERROR(EFSCORRUPTED);
4156 }
4157 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) {
4158 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4159 XFS_ERRLEVEL_LOW, log->l_mp);
4160 return XFS_ERROR(EFSCORRUPTED);
4161 }
4162 return 0;
4163 }
4164
4165 /*
4166 * Read the log from tail to head and process the log records found.
4167 * Handle the two cases where the tail and head are in the same cycle
4168 * and where the active portion of the log wraps around the end of
4169 * the physical log separately. The pass parameter is passed through
4170 * to the routines called to process the data and is not looked at
4171 * here.
4172 */
4173 STATIC int
4174 xlog_do_recovery_pass(
4175 struct xlog *log,
4176 xfs_daddr_t head_blk,
4177 xfs_daddr_t tail_blk,
4178 int pass)
4179 {
4180 xlog_rec_header_t *rhead;
4181 xfs_daddr_t blk_no;
4182 xfs_caddr_t offset;
4183 xfs_buf_t *hbp, *dbp;
4184 int error = 0, h_size;
4185 int bblks, split_bblks;
4186 int hblks, split_hblks, wrapped_hblks;
4187 struct hlist_head rhash[XLOG_RHASH_SIZE];
4188
4189 ASSERT(head_blk != tail_blk);
4190
4191 /*
4192 * Read the header of the tail block and get the iclog buffer size from
4193 * h_size. Use this to tell how many sectors make up the log header.
4194 */
4195 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
4196 /*
4197 * When using variable length iclogs, read first sector of
4198 * iclog header and extract the header size from it. Get a
4199 * new hbp that is the correct size.
4200 */
4201 hbp = xlog_get_bp(log, 1);
4202 if (!hbp)
4203 return ENOMEM;
4204
4205 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
4206 if (error)
4207 goto bread_err1;
4208
4209 rhead = (xlog_rec_header_t *)offset;
4210 error = xlog_valid_rec_header(log, rhead, tail_blk);
4211 if (error)
4212 goto bread_err1;
4213 h_size = be32_to_cpu(rhead->h_size);
4214 if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) &&
4215 (h_size > XLOG_HEADER_CYCLE_SIZE)) {
4216 hblks = h_size / XLOG_HEADER_CYCLE_SIZE;
4217 if (h_size % XLOG_HEADER_CYCLE_SIZE)
4218 hblks++;
4219 xlog_put_bp(hbp);
4220 hbp = xlog_get_bp(log, hblks);
4221 } else {
4222 hblks = 1;
4223 }
4224 } else {
4225 ASSERT(log->l_sectBBsize == 1);
4226 hblks = 1;
4227 hbp = xlog_get_bp(log, 1);
4228 h_size = XLOG_BIG_RECORD_BSIZE;
4229 }
4230
4231 if (!hbp)
4232 return ENOMEM;
4233 dbp = xlog_get_bp(log, BTOBB(h_size));
4234 if (!dbp) {
4235 xlog_put_bp(hbp);
4236 return ENOMEM;
4237 }
4238
4239 memset(rhash, 0, sizeof(rhash));
4240 if (tail_blk <= head_blk) {
4241 for (blk_no = tail_blk; blk_no < head_blk; ) {
4242 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4243 if (error)
4244 goto bread_err2;
4245
4246 rhead = (xlog_rec_header_t *)offset;
4247 error = xlog_valid_rec_header(log, rhead, blk_no);
4248 if (error)
4249 goto bread_err2;
4250
4251 /* blocks in data section */
4252 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4253 error = xlog_bread(log, blk_no + hblks, bblks, dbp,
4254 &offset);
4255 if (error)
4256 goto bread_err2;
4257
4258 error = xlog_unpack_data(rhead, offset, log);
4259 if (error)
4260 goto bread_err2;
4261
4262 error = xlog_recover_process_data(log,
4263 rhash, rhead, offset, pass);
4264 if (error)
4265 goto bread_err2;
4266 blk_no += bblks + hblks;
4267 }
4268 } else {
4269 /*
4270 * Perform recovery around the end of the physical log.
4271 * When the head is not on the same cycle number as the tail,
4272 * we can't do a sequential recovery as above.
4273 */
4274 blk_no = tail_blk;
4275 while (blk_no < log->l_logBBsize) {
4276 /*
4277 * Check for header wrapping around physical end-of-log
4278 */
4279 offset = hbp->b_addr;
4280 split_hblks = 0;
4281 wrapped_hblks = 0;
4282 if (blk_no + hblks <= log->l_logBBsize) {
4283 /* Read header in one read */
4284 error = xlog_bread(log, blk_no, hblks, hbp,
4285 &offset);
4286 if (error)
4287 goto bread_err2;
4288 } else {
4289 /* This LR is split across physical log end */
4290 if (blk_no != log->l_logBBsize) {
4291 /* some data before physical log end */
4292 ASSERT(blk_no <= INT_MAX);
4293 split_hblks = log->l_logBBsize - (int)blk_no;
4294 ASSERT(split_hblks > 0);
4295 error = xlog_bread(log, blk_no,
4296 split_hblks, hbp,
4297 &offset);
4298 if (error)
4299 goto bread_err2;
4300 }
4301
4302 /*
4303 * Note: this black magic still works with
4304 * large sector sizes (non-512) only because:
4305 * - we increased the buffer size originally
4306 * by 1 sector giving us enough extra space
4307 * for the second read;
4308 * - the log start is guaranteed to be sector
4309 * aligned;
4310 * - we read the log end (LR header start)
4311 * _first_, then the log start (LR header end)
4312 * - order is important.
4313 */
4314 wrapped_hblks = hblks - split_hblks;
4315 error = xlog_bread_offset(log, 0,
4316 wrapped_hblks, hbp,
4317 offset + BBTOB(split_hblks));
4318 if (error)
4319 goto bread_err2;
4320 }
4321 rhead = (xlog_rec_header_t *)offset;
4322 error = xlog_valid_rec_header(log, rhead,
4323 split_hblks ? blk_no : 0);
4324 if (error)
4325 goto bread_err2;
4326
4327 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4328 blk_no += hblks;
4329
4330 /* Read in data for log record */
4331 if (blk_no + bblks <= log->l_logBBsize) {
4332 error = xlog_bread(log, blk_no, bblks, dbp,
4333 &offset);
4334 if (error)
4335 goto bread_err2;
4336 } else {
4337 /* This log record is split across the
4338 * physical end of log */
4339 offset = dbp->b_addr;
4340 split_bblks = 0;
4341 if (blk_no != log->l_logBBsize) {
4342 /* some data is before the physical
4343 * end of log */
4344 ASSERT(!wrapped_hblks);
4345 ASSERT(blk_no <= INT_MAX);
4346 split_bblks =
4347 log->l_logBBsize - (int)blk_no;
4348 ASSERT(split_bblks > 0);
4349 error = xlog_bread(log, blk_no,
4350 split_bblks, dbp,
4351 &offset);
4352 if (error)
4353 goto bread_err2;
4354 }
4355
4356 /*
4357 * Note: this black magic still works with
4358 * large sector sizes (non-512) only because:
4359 * - we increased the buffer size originally
4360 * by 1 sector giving us enough extra space
4361 * for the second read;
4362 * - the log start is guaranteed to be sector
4363 * aligned;
4364 * - we read the log end (LR header start)
4365 * _first_, then the log start (LR header end)
4366 * - order is important.
4367 */
4368 error = xlog_bread_offset(log, 0,
4369 bblks - split_bblks, dbp,
4370 offset + BBTOB(split_bblks));
4371 if (error)
4372 goto bread_err2;
4373 }
4374
4375 error = xlog_unpack_data(rhead, offset, log);
4376 if (error)
4377 goto bread_err2;
4378
4379 error = xlog_recover_process_data(log, rhash,
4380 rhead, offset, pass);
4381 if (error)
4382 goto bread_err2;
4383 blk_no += bblks;
4384 }
4385
4386 ASSERT(blk_no >= log->l_logBBsize);
4387 blk_no -= log->l_logBBsize;
4388
4389 /* read first part of physical log */
4390 while (blk_no < head_blk) {
4391 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
4392 if (error)
4393 goto bread_err2;
4394
4395 rhead = (xlog_rec_header_t *)offset;
4396 error = xlog_valid_rec_header(log, rhead, blk_no);
4397 if (error)
4398 goto bread_err2;
4399
4400 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
4401 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
4402 &offset);
4403 if (error)
4404 goto bread_err2;
4405
4406 error = xlog_unpack_data(rhead, offset, log);
4407 if (error)
4408 goto bread_err2;
4409
4410 error = xlog_recover_process_data(log, rhash,
4411 rhead, offset, pass);
4412 if (error)
4413 goto bread_err2;
4414 blk_no += bblks + hblks;
4415 }
4416 }
4417
4418 bread_err2:
4419 xlog_put_bp(dbp);
4420 bread_err1:
4421 xlog_put_bp(hbp);
4422 return error;
4423 }
4424
4425 /*
4426 * Do the recovery of the log. We actually do this in two phases.
4427 * The two passes are necessary in order to implement the function
4428 * of cancelling a record written into the log. The first pass
4429 * determines those things which have been cancelled, and the
4430 * second pass replays log items normally except for those which
4431 * have been cancelled. The handling of the replay and cancellations
4432 * takes place in the log item type specific routines.
4433 *
4434 * The table of items which have cancel records in the log is allocated
4435 * and freed at this level, since only here do we know when all of
4436 * the log recovery has been completed.
4437 */
4438 STATIC int
4439 xlog_do_log_recovery(
4440 struct xlog *log,
4441 xfs_daddr_t head_blk,
4442 xfs_daddr_t tail_blk)
4443 {
4444 int error, i;
4445
4446 ASSERT(head_blk != tail_blk);
4447
4448 /*
4449 * First do a pass to find all of the cancelled buf log items.
4450 * Store them in the buf_cancel_table for use in the second pass.
4451 */
4452 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
4453 sizeof(struct list_head),
4454 KM_SLEEP);
4455 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4456 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
4457
4458 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4459 XLOG_RECOVER_PASS1);
4460 if (error != 0) {
4461 kmem_free(log->l_buf_cancel_table);
4462 log->l_buf_cancel_table = NULL;
4463 return error;
4464 }
4465 /*
4466 * Then do a second pass to actually recover the items in the log.
4467 * When it is complete free the table of buf cancel items.
4468 */
4469 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
4470 XLOG_RECOVER_PASS2);
4471 #ifdef DEBUG
4472 if (!error) {
4473 int i;
4474
4475 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
4476 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
4477 }
4478 #endif /* DEBUG */
4479
4480 kmem_free(log->l_buf_cancel_table);
4481 log->l_buf_cancel_table = NULL;
4482
4483 return error;
4484 }
4485
4486 /*
4487 * Do the actual recovery
4488 */
4489 STATIC int
4490 xlog_do_recover(
4491 struct xlog *log,
4492 xfs_daddr_t head_blk,
4493 xfs_daddr_t tail_blk)
4494 {
4495 int error;
4496 xfs_buf_t *bp;
4497 xfs_sb_t *sbp;
4498
4499 /*
4500 * First replay the images in the log.
4501 */
4502 error = xlog_do_log_recovery(log, head_blk, tail_blk);
4503 if (error)
4504 return error;
4505
4506 /*
4507 * If IO errors happened during recovery, bail out.
4508 */
4509 if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
4510 return (EIO);
4511 }
4512
4513 /*
4514 * We now update the tail_lsn since much of the recovery has completed
4515 * and there may be space available to use. If there were no extent
4516 * or iunlinks, we can free up the entire log and set the tail_lsn to
4517 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4518 * lsn of the last known good LR on disk. If there are extent frees
4519 * or iunlinks they will have some entries in the AIL; so we look at
4520 * the AIL to determine how to set the tail_lsn.
4521 */
4522 xlog_assign_tail_lsn(log->l_mp);
4523
4524 /*
4525 * Now that we've finished replaying all buffer and inode
4526 * updates, re-read in the superblock and reverify it.
4527 */
4528 bp = xfs_getsb(log->l_mp, 0);
4529 XFS_BUF_UNDONE(bp);
4530 ASSERT(!(XFS_BUF_ISWRITE(bp)));
4531 XFS_BUF_READ(bp);
4532 XFS_BUF_UNASYNC(bp);
4533 bp->b_ops = &xfs_sb_buf_ops;
4534 xfsbdstrat(log->l_mp, bp);
4535 error = xfs_buf_iowait(bp);
4536 if (error) {
4537 xfs_buf_ioerror_alert(bp, __func__);
4538 ASSERT(0);
4539 xfs_buf_relse(bp);
4540 return error;
4541 }
4542
4543 /* Convert superblock from on-disk format */
4544 sbp = &log->l_mp->m_sb;
4545 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
4546 ASSERT(sbp->sb_magicnum == XFS_SB_MAGIC);
4547 ASSERT(xfs_sb_good_version(sbp));
4548 xfs_buf_relse(bp);
4549
4550 /* We've re-read the superblock so re-initialize per-cpu counters */
4551 xfs_icsb_reinit_counters(log->l_mp);
4552
4553 xlog_recover_check_summary(log);
4554
4555 /* Normal transactions can now occur */
4556 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
4557 return 0;
4558 }
4559
4560 /*
4561 * Perform recovery and re-initialize some log variables in xlog_find_tail.
4562 *
4563 * Return error or zero.
4564 */
4565 int
4566 xlog_recover(
4567 struct xlog *log)
4568 {
4569 xfs_daddr_t head_blk, tail_blk;
4570 int error;
4571
4572 /* find the tail of the log */
4573 if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
4574 return error;
4575
4576 if (tail_blk != head_blk) {
4577 /* There used to be a comment here:
4578 *
4579 * disallow recovery on read-only mounts. note -- mount
4580 * checks for ENOSPC and turns it into an intelligent
4581 * error message.
4582 * ...but this is no longer true. Now, unless you specify
4583 * NORECOVERY (in which case this function would never be
4584 * called), we just go ahead and recover. We do this all
4585 * under the vfs layer, so we can get away with it unless
4586 * the device itself is read-only, in which case we fail.
4587 */
4588 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
4589 return error;
4590 }
4591
4592 /*
4593 * Version 5 superblock log feature mask validation. We know the
4594 * log is dirty so check if there are any unknown log features
4595 * in what we need to recover. If there are unknown features
4596 * (e.g. unsupported transactions, then simply reject the
4597 * attempt at recovery before touching anything.
4598 */
4599 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
4600 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
4601 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
4602 xfs_warn(log->l_mp,
4603 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4604 "The log can not be fully and/or safely recovered by this kernel.\n"
4605 "Please recover the log on a kernel that supports the unknown features.",
4606 (log->l_mp->m_sb.sb_features_log_incompat &
4607 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
4608 return EINVAL;
4609 }
4610
4611 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
4612 log->l_mp->m_logname ? log->l_mp->m_logname
4613 : "internal");
4614
4615 error = xlog_do_recover(log, head_blk, tail_blk);
4616 log->l_flags |= XLOG_RECOVERY_NEEDED;
4617 }
4618 return error;
4619 }
4620
4621 /*
4622 * In the first part of recovery we replay inodes and buffers and build
4623 * up the list of extent free items which need to be processed. Here
4624 * we process the extent free items and clean up the on disk unlinked
4625 * inode lists. This is separated from the first part of recovery so
4626 * that the root and real-time bitmap inodes can be read in from disk in
4627 * between the two stages. This is necessary so that we can free space
4628 * in the real-time portion of the file system.
4629 */
4630 int
4631 xlog_recover_finish(
4632 struct xlog *log)
4633 {
4634 /*
4635 * Now we're ready to do the transactions needed for the
4636 * rest of recovery. Start with completing all the extent
4637 * free intent records and then process the unlinked inode
4638 * lists. At this point, we essentially run in normal mode
4639 * except that we're still performing recovery actions
4640 * rather than accepting new requests.
4641 */
4642 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
4643 int error;
4644 error = xlog_recover_process_efis(log);
4645 if (error) {
4646 xfs_alert(log->l_mp, "Failed to recover EFIs");
4647 return error;
4648 }
4649 /*
4650 * Sync the log to get all the EFIs out of the AIL.
4651 * This isn't absolutely necessary, but it helps in
4652 * case the unlink transactions would have problems
4653 * pushing the EFIs out of the way.
4654 */
4655 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
4656
4657 xlog_recover_process_iunlinks(log);
4658
4659 xlog_recover_check_summary(log);
4660
4661 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
4662 log->l_mp->m_logname ? log->l_mp->m_logname
4663 : "internal");
4664 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
4665 } else {
4666 xfs_info(log->l_mp, "Ending clean mount");
4667 }
4668 return 0;
4669 }
4670
4671
4672 #if defined(DEBUG)
4673 /*
4674 * Read all of the agf and agi counters and check that they
4675 * are consistent with the superblock counters.
4676 */
4677 void
4678 xlog_recover_check_summary(
4679 struct xlog *log)
4680 {
4681 xfs_mount_t *mp;
4682 xfs_agf_t *agfp;
4683 xfs_buf_t *agfbp;
4684 xfs_buf_t *agibp;
4685 xfs_agnumber_t agno;
4686 __uint64_t freeblks;
4687 __uint64_t itotal;
4688 __uint64_t ifree;
4689 int error;
4690
4691 mp = log->l_mp;
4692
4693 freeblks = 0LL;
4694 itotal = 0LL;
4695 ifree = 0LL;
4696 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
4697 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
4698 if (error) {
4699 xfs_alert(mp, "%s agf read failed agno %d error %d",
4700 __func__, agno, error);
4701 } else {
4702 agfp = XFS_BUF_TO_AGF(agfbp);
4703 freeblks += be32_to_cpu(agfp->agf_freeblks) +
4704 be32_to_cpu(agfp->agf_flcount);
4705 xfs_buf_relse(agfbp);
4706 }
4707
4708 error = xfs_read_agi(mp, NULL, agno, &agibp);
4709 if (error) {
4710 xfs_alert(mp, "%s agi read failed agno %d error %d",
4711 __func__, agno, error);
4712 } else {
4713 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
4714
4715 itotal += be32_to_cpu(agi->agi_count);
4716 ifree += be32_to_cpu(agi->agi_freecount);
4717 xfs_buf_relse(agibp);
4718 }
4719 }
4720 }
4721 #endif /* DEBUG */
This page took 0.122731 seconds and 6 git commands to generate.