2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_trans.h"
34 #include "xfs_log_priv.h"
35 #include "xfs_log_recover.h"
36 #include "xfs_inode_item.h"
37 #include "xfs_extfree_item.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_alloc.h"
40 #include "xfs_ialloc.h"
41 #include "xfs_quota.h"
42 #include "xfs_cksum.h"
43 #include "xfs_trace.h"
44 #include "xfs_icache.h"
45 #include "xfs_bmap_btree.h"
46 #include "xfs_dinode.h"
47 #include "xfs_error.h"
50 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
57 xlog_clear_stale_blocks(
62 xlog_recover_check_summary(
65 #define xlog_recover_check_summary(log)
69 * This structure is used during recovery to record the buf log items which
70 * have been canceled and should not be replayed.
72 struct xfs_buf_cancel
{
76 struct list_head bc_list
;
80 * Sector aligned buffer routines for buffer create/read/write/access
84 * Verify the given count of basic blocks is valid number of blocks
85 * to specify for an operation involving the given XFS log buffer.
86 * Returns nonzero if the count is valid, 0 otherwise.
90 xlog_buf_bbcount_valid(
94 return bbcount
> 0 && bbcount
<= log
->l_logBBsize
;
98 * Allocate a buffer to hold log data. The buffer needs to be able
99 * to map to a range of nbblks basic blocks at any valid (basic
100 * block) offset within the log.
109 if (!xlog_buf_bbcount_valid(log
, nbblks
)) {
110 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
112 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_HIGH
, log
->l_mp
);
117 * We do log I/O in units of log sectors (a power-of-2
118 * multiple of the basic block size), so we round up the
119 * requested size to accommodate the basic blocks required
120 * for complete log sectors.
122 * In addition, the buffer may be used for a non-sector-
123 * aligned block offset, in which case an I/O of the
124 * requested size could extend beyond the end of the
125 * buffer. If the requested size is only 1 basic block it
126 * will never straddle a sector boundary, so this won't be
127 * an issue. Nor will this be a problem if the log I/O is
128 * done in basic blocks (sector size 1). But otherwise we
129 * extend the buffer by one extra log sector to ensure
130 * there's space to accommodate this possibility.
132 if (nbblks
> 1 && log
->l_sectBBsize
> 1)
133 nbblks
+= log
->l_sectBBsize
;
134 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
136 bp
= xfs_buf_get_uncached(log
->l_mp
->m_logdev_targp
, nbblks
, 0);
150 * Return the address of the start of the given block number's data
151 * in a log buffer. The buffer covers a log sector-aligned region.
160 xfs_daddr_t offset
= blk_no
& ((xfs_daddr_t
)log
->l_sectBBsize
- 1);
162 ASSERT(offset
+ nbblks
<= bp
->b_length
);
163 return bp
->b_addr
+ BBTOB(offset
);
168 * nbblks should be uint, but oh well. Just want to catch that 32-bit length.
179 if (!xlog_buf_bbcount_valid(log
, nbblks
)) {
180 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
182 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_HIGH
, log
->l_mp
);
183 return -EFSCORRUPTED
;
186 blk_no
= round_down(blk_no
, log
->l_sectBBsize
);
187 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
190 ASSERT(nbblks
<= bp
->b_length
);
192 XFS_BUF_SET_ADDR(bp
, log
->l_logBBstart
+ blk_no
);
194 bp
->b_io_length
= nbblks
;
197 error
= xfs_buf_submit_wait(bp
);
198 if (error
&& !XFS_FORCED_SHUTDOWN(log
->l_mp
))
199 xfs_buf_ioerror_alert(bp
, __func__
);
213 error
= xlog_bread_noalign(log
, blk_no
, nbblks
, bp
);
217 *offset
= xlog_align(log
, blk_no
, nbblks
, bp
);
222 * Read at an offset into the buffer. Returns with the buffer in it's original
223 * state regardless of the result of the read.
228 xfs_daddr_t blk_no
, /* block to read from */
229 int nbblks
, /* blocks to read */
233 xfs_caddr_t orig_offset
= bp
->b_addr
;
234 int orig_len
= BBTOB(bp
->b_length
);
237 error
= xfs_buf_associate_memory(bp
, offset
, BBTOB(nbblks
));
241 error
= xlog_bread_noalign(log
, blk_no
, nbblks
, bp
);
243 /* must reset buffer pointer even on error */
244 error2
= xfs_buf_associate_memory(bp
, orig_offset
, orig_len
);
251 * Write out the buffer at the given block for the given number of blocks.
252 * The buffer is kept locked across the write and is returned locked.
253 * This can only be used for synchronous log writes.
264 if (!xlog_buf_bbcount_valid(log
, nbblks
)) {
265 xfs_warn(log
->l_mp
, "Invalid block length (0x%x) for buffer",
267 XFS_ERROR_REPORT(__func__
, XFS_ERRLEVEL_HIGH
, log
->l_mp
);
268 return -EFSCORRUPTED
;
271 blk_no
= round_down(blk_no
, log
->l_sectBBsize
);
272 nbblks
= round_up(nbblks
, log
->l_sectBBsize
);
275 ASSERT(nbblks
<= bp
->b_length
);
277 XFS_BUF_SET_ADDR(bp
, log
->l_logBBstart
+ blk_no
);
278 XFS_BUF_ZEROFLAGS(bp
);
281 bp
->b_io_length
= nbblks
;
284 error
= xfs_bwrite(bp
);
286 xfs_buf_ioerror_alert(bp
, __func__
);
293 * dump debug superblock and log record information
296 xlog_header_check_dump(
298 xlog_rec_header_t
*head
)
300 xfs_debug(mp
, "%s: SB : uuid = %pU, fmt = %d",
301 __func__
, &mp
->m_sb
.sb_uuid
, XLOG_FMT
);
302 xfs_debug(mp
, " log : uuid = %pU, fmt = %d",
303 &head
->h_fs_uuid
, be32_to_cpu(head
->h_fmt
));
306 #define xlog_header_check_dump(mp, head)
310 * check log record header for recovery
313 xlog_header_check_recover(
315 xlog_rec_header_t
*head
)
317 ASSERT(head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
));
320 * IRIX doesn't write the h_fmt field and leaves it zeroed
321 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
322 * a dirty log created in IRIX.
324 if (unlikely(head
->h_fmt
!= cpu_to_be32(XLOG_FMT
))) {
326 "dirty log written in incompatible format - can't recover");
327 xlog_header_check_dump(mp
, head
);
328 XFS_ERROR_REPORT("xlog_header_check_recover(1)",
329 XFS_ERRLEVEL_HIGH
, mp
);
330 return -EFSCORRUPTED
;
331 } else if (unlikely(!uuid_equal(&mp
->m_sb
.sb_uuid
, &head
->h_fs_uuid
))) {
333 "dirty log entry has mismatched uuid - can't recover");
334 xlog_header_check_dump(mp
, head
);
335 XFS_ERROR_REPORT("xlog_header_check_recover(2)",
336 XFS_ERRLEVEL_HIGH
, mp
);
337 return -EFSCORRUPTED
;
343 * read the head block of the log and check the header
346 xlog_header_check_mount(
348 xlog_rec_header_t
*head
)
350 ASSERT(head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
));
352 if (uuid_is_nil(&head
->h_fs_uuid
)) {
354 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
355 * h_fs_uuid is nil, we assume this log was last mounted
356 * by IRIX and continue.
358 xfs_warn(mp
, "nil uuid in log - IRIX style log");
359 } else if (unlikely(!uuid_equal(&mp
->m_sb
.sb_uuid
, &head
->h_fs_uuid
))) {
360 xfs_warn(mp
, "log has mismatched uuid - can't recover");
361 xlog_header_check_dump(mp
, head
);
362 XFS_ERROR_REPORT("xlog_header_check_mount",
363 XFS_ERRLEVEL_HIGH
, mp
);
364 return -EFSCORRUPTED
;
375 * We're not going to bother about retrying
376 * this during recovery. One strike!
378 if (!XFS_FORCED_SHUTDOWN(bp
->b_target
->bt_mount
)) {
379 xfs_buf_ioerror_alert(bp
, __func__
);
380 xfs_force_shutdown(bp
->b_target
->bt_mount
,
381 SHUTDOWN_META_IO_ERROR
);
389 * This routine finds (to an approximation) the first block in the physical
390 * log which contains the given cycle. It uses a binary search algorithm.
391 * Note that the algorithm can not be perfect because the disk will not
392 * necessarily be perfect.
395 xlog_find_cycle_start(
398 xfs_daddr_t first_blk
,
399 xfs_daddr_t
*last_blk
,
409 mid_blk
= BLK_AVG(first_blk
, end_blk
);
410 while (mid_blk
!= first_blk
&& mid_blk
!= end_blk
) {
411 error
= xlog_bread(log
, mid_blk
, 1, bp
, &offset
);
414 mid_cycle
= xlog_get_cycle(offset
);
415 if (mid_cycle
== cycle
)
416 end_blk
= mid_blk
; /* last_half_cycle == mid_cycle */
418 first_blk
= mid_blk
; /* first_half_cycle == mid_cycle */
419 mid_blk
= BLK_AVG(first_blk
, end_blk
);
421 ASSERT((mid_blk
== first_blk
&& mid_blk
+1 == end_blk
) ||
422 (mid_blk
== end_blk
&& mid_blk
-1 == first_blk
));
430 * Check that a range of blocks does not contain stop_on_cycle_no.
431 * Fill in *new_blk with the block offset where such a block is
432 * found, or with -1 (an invalid block number) if there is no such
433 * block in the range. The scan needs to occur from front to back
434 * and the pointer into the region must be updated since a later
435 * routine will need to perform another test.
438 xlog_find_verify_cycle(
440 xfs_daddr_t start_blk
,
442 uint stop_on_cycle_no
,
443 xfs_daddr_t
*new_blk
)
449 xfs_caddr_t buf
= NULL
;
453 * Greedily allocate a buffer big enough to handle the full
454 * range of basic blocks we'll be examining. If that fails,
455 * try a smaller size. We need to be able to read at least
456 * a log sector, or we're out of luck.
458 bufblks
= 1 << ffs(nbblks
);
459 while (bufblks
> log
->l_logBBsize
)
461 while (!(bp
= xlog_get_bp(log
, bufblks
))) {
463 if (bufblks
< log
->l_sectBBsize
)
467 for (i
= start_blk
; i
< start_blk
+ nbblks
; i
+= bufblks
) {
470 bcount
= min(bufblks
, (start_blk
+ nbblks
- i
));
472 error
= xlog_bread(log
, i
, bcount
, bp
, &buf
);
476 for (j
= 0; j
< bcount
; j
++) {
477 cycle
= xlog_get_cycle(buf
);
478 if (cycle
== stop_on_cycle_no
) {
495 * Potentially backup over partial log record write.
497 * In the typical case, last_blk is the number of the block directly after
498 * a good log record. Therefore, we subtract one to get the block number
499 * of the last block in the given buffer. extra_bblks contains the number
500 * of blocks we would have read on a previous read. This happens when the
501 * last log record is split over the end of the physical log.
503 * extra_bblks is the number of blocks potentially verified on a previous
504 * call to this routine.
507 xlog_find_verify_log_record(
509 xfs_daddr_t start_blk
,
510 xfs_daddr_t
*last_blk
,
515 xfs_caddr_t offset
= NULL
;
516 xlog_rec_header_t
*head
= NULL
;
519 int num_blks
= *last_blk
- start_blk
;
522 ASSERT(start_blk
!= 0 || *last_blk
!= start_blk
);
524 if (!(bp
= xlog_get_bp(log
, num_blks
))) {
525 if (!(bp
= xlog_get_bp(log
, 1)))
529 error
= xlog_bread(log
, start_blk
, num_blks
, bp
, &offset
);
532 offset
+= ((num_blks
- 1) << BBSHIFT
);
535 for (i
= (*last_blk
) - 1; i
>= 0; i
--) {
537 /* valid log record not found */
539 "Log inconsistent (didn't find previous header)");
546 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
551 head
= (xlog_rec_header_t
*)offset
;
553 if (head
->h_magicno
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
))
561 * We hit the beginning of the physical log & still no header. Return
562 * to caller. If caller can handle a return of -1, then this routine
563 * will be called again for the end of the physical log.
571 * We have the final block of the good log (the first block
572 * of the log record _before_ the head. So we check the uuid.
574 if ((error
= xlog_header_check_mount(log
->l_mp
, head
)))
578 * We may have found a log record header before we expected one.
579 * last_blk will be the 1st block # with a given cycle #. We may end
580 * up reading an entire log record. In this case, we don't want to
581 * reset last_blk. Only when last_blk points in the middle of a log
582 * record do we update last_blk.
584 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
585 uint h_size
= be32_to_cpu(head
->h_size
);
587 xhdrs
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
588 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
594 if (*last_blk
- i
+ extra_bblks
!=
595 BTOBB(be32_to_cpu(head
->h_len
)) + xhdrs
)
604 * Head is defined to be the point of the log where the next log write
605 * could go. This means that incomplete LR writes at the end are
606 * eliminated when calculating the head. We aren't guaranteed that previous
607 * LR have complete transactions. We only know that a cycle number of
608 * current cycle number -1 won't be present in the log if we start writing
609 * from our current block number.
611 * last_blk contains the block number of the first block with a given
614 * Return: zero if normal, non-zero if error.
619 xfs_daddr_t
*return_head_blk
)
623 xfs_daddr_t new_blk
, first_blk
, start_blk
, last_blk
, head_blk
;
625 uint first_half_cycle
, last_half_cycle
;
627 int error
, log_bbnum
= log
->l_logBBsize
;
629 /* Is the end of the log device zeroed? */
630 error
= xlog_find_zeroed(log
, &first_blk
);
632 xfs_warn(log
->l_mp
, "empty log check failed");
636 *return_head_blk
= first_blk
;
638 /* Is the whole lot zeroed? */
640 /* Linux XFS shouldn't generate totally zeroed logs -
641 * mkfs etc write a dummy unmount record to a fresh
642 * log so we can store the uuid in there
644 xfs_warn(log
->l_mp
, "totally zeroed log");
650 first_blk
= 0; /* get cycle # of 1st block */
651 bp
= xlog_get_bp(log
, 1);
655 error
= xlog_bread(log
, 0, 1, bp
, &offset
);
659 first_half_cycle
= xlog_get_cycle(offset
);
661 last_blk
= head_blk
= log_bbnum
- 1; /* get cycle # of last block */
662 error
= xlog_bread(log
, last_blk
, 1, bp
, &offset
);
666 last_half_cycle
= xlog_get_cycle(offset
);
667 ASSERT(last_half_cycle
!= 0);
670 * If the 1st half cycle number is equal to the last half cycle number,
671 * then the entire log is stamped with the same cycle number. In this
672 * case, head_blk can't be set to zero (which makes sense). The below
673 * math doesn't work out properly with head_blk equal to zero. Instead,
674 * we set it to log_bbnum which is an invalid block number, but this
675 * value makes the math correct. If head_blk doesn't changed through
676 * all the tests below, *head_blk is set to zero at the very end rather
677 * than log_bbnum. In a sense, log_bbnum and zero are the same block
678 * in a circular file.
680 if (first_half_cycle
== last_half_cycle
) {
682 * In this case we believe that the entire log should have
683 * cycle number last_half_cycle. We need to scan backwards
684 * from the end verifying that there are no holes still
685 * containing last_half_cycle - 1. If we find such a hole,
686 * then the start of that hole will be the new head. The
687 * simple case looks like
688 * x | x ... | x - 1 | x
689 * Another case that fits this picture would be
690 * x | x + 1 | x ... | x
691 * In this case the head really is somewhere at the end of the
692 * log, as one of the latest writes at the beginning was
695 * x | x + 1 | x ... | x - 1 | x
696 * This is really the combination of the above two cases, and
697 * the head has to end up at the start of the x-1 hole at the
700 * In the 256k log case, we will read from the beginning to the
701 * end of the log and search for cycle numbers equal to x-1.
702 * We don't worry about the x+1 blocks that we encounter,
703 * because we know that they cannot be the head since the log
706 head_blk
= log_bbnum
;
707 stop_on_cycle
= last_half_cycle
- 1;
710 * In this case we want to find the first block with cycle
711 * number matching last_half_cycle. We expect the log to be
713 * x + 1 ... | x ... | x
714 * The first block with cycle number x (last_half_cycle) will
715 * be where the new head belongs. First we do a binary search
716 * for the first occurrence of last_half_cycle. The binary
717 * search may not be totally accurate, so then we scan back
718 * from there looking for occurrences of last_half_cycle before
719 * us. If that backwards scan wraps around the beginning of
720 * the log, then we look for occurrences of last_half_cycle - 1
721 * at the end of the log. The cases we're looking for look
723 * v binary search stopped here
724 * x + 1 ... | x | x + 1 | x ... | x
725 * ^ but we want to locate this spot
727 * <---------> less than scan distance
728 * x + 1 ... | x ... | x - 1 | x
729 * ^ we want to locate this spot
731 stop_on_cycle
= last_half_cycle
;
732 if ((error
= xlog_find_cycle_start(log
, bp
, first_blk
,
733 &head_blk
, last_half_cycle
)))
738 * Now validate the answer. Scan back some number of maximum possible
739 * blocks and make sure each one has the expected cycle number. The
740 * maximum is determined by the total possible amount of buffering
741 * in the in-core log. The following number can be made tighter if
742 * we actually look at the block size of the filesystem.
744 num_scan_bblks
= XLOG_TOTAL_REC_SHIFT(log
);
745 if (head_blk
>= num_scan_bblks
) {
747 * We are guaranteed that the entire check can be performed
750 start_blk
= head_blk
- num_scan_bblks
;
751 if ((error
= xlog_find_verify_cycle(log
,
752 start_blk
, num_scan_bblks
,
753 stop_on_cycle
, &new_blk
)))
757 } else { /* need to read 2 parts of log */
759 * We are going to scan backwards in the log in two parts.
760 * First we scan the physical end of the log. In this part
761 * of the log, we are looking for blocks with cycle number
762 * last_half_cycle - 1.
763 * If we find one, then we know that the log starts there, as
764 * we've found a hole that didn't get written in going around
765 * the end of the physical log. The simple case for this is
766 * x + 1 ... | x ... | x - 1 | x
767 * <---------> less than scan distance
768 * If all of the blocks at the end of the log have cycle number
769 * last_half_cycle, then we check the blocks at the start of
770 * the log looking for occurrences of last_half_cycle. If we
771 * find one, then our current estimate for the location of the
772 * first occurrence of last_half_cycle is wrong and we move
773 * back to the hole we've found. This case looks like
774 * x + 1 ... | x | x + 1 | x ...
775 * ^ binary search stopped here
776 * Another case we need to handle that only occurs in 256k
778 * x + 1 ... | x ... | x+1 | x ...
779 * ^ binary search stops here
780 * In a 256k log, the scan at the end of the log will see the
781 * x + 1 blocks. We need to skip past those since that is
782 * certainly not the head of the log. By searching for
783 * last_half_cycle-1 we accomplish that.
785 ASSERT(head_blk
<= INT_MAX
&&
786 (xfs_daddr_t
) num_scan_bblks
>= head_blk
);
787 start_blk
= log_bbnum
- (num_scan_bblks
- head_blk
);
788 if ((error
= xlog_find_verify_cycle(log
, start_blk
,
789 num_scan_bblks
- (int)head_blk
,
790 (stop_on_cycle
- 1), &new_blk
)))
798 * Scan beginning of log now. The last part of the physical
799 * log is good. This scan needs to verify that it doesn't find
800 * the last_half_cycle.
803 ASSERT(head_blk
<= INT_MAX
);
804 if ((error
= xlog_find_verify_cycle(log
,
805 start_blk
, (int)head_blk
,
806 stop_on_cycle
, &new_blk
)))
814 * Now we need to make sure head_blk is not pointing to a block in
815 * the middle of a log record.
817 num_scan_bblks
= XLOG_REC_SHIFT(log
);
818 if (head_blk
>= num_scan_bblks
) {
819 start_blk
= head_blk
- num_scan_bblks
; /* don't read head_blk */
821 /* start ptr at last block ptr before head_blk */
822 error
= xlog_find_verify_log_record(log
, start_blk
, &head_blk
, 0);
829 ASSERT(head_blk
<= INT_MAX
);
830 error
= xlog_find_verify_log_record(log
, start_blk
, &head_blk
, 0);
834 /* We hit the beginning of the log during our search */
835 start_blk
= log_bbnum
- (num_scan_bblks
- head_blk
);
837 ASSERT(start_blk
<= INT_MAX
&&
838 (xfs_daddr_t
) log_bbnum
-start_blk
>= 0);
839 ASSERT(head_blk
<= INT_MAX
);
840 error
= xlog_find_verify_log_record(log
, start_blk
,
841 &new_blk
, (int)head_blk
);
846 if (new_blk
!= log_bbnum
)
853 if (head_blk
== log_bbnum
)
854 *return_head_blk
= 0;
856 *return_head_blk
= head_blk
;
858 * When returning here, we have a good block number. Bad block
859 * means that during a previous crash, we didn't have a clean break
860 * from cycle number N to cycle number N-1. In this case, we need
861 * to find the first block with cycle number N-1.
869 xfs_warn(log
->l_mp
, "failed to find log head");
874 * Find the sync block number or the tail of the log.
876 * This will be the block number of the last record to have its
877 * associated buffers synced to disk. Every log record header has
878 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
879 * to get a sync block number. The only concern is to figure out which
880 * log record header to believe.
882 * The following algorithm uses the log record header with the largest
883 * lsn. The entire log record does not need to be valid. We only care
884 * that the header is valid.
886 * We could speed up search by using current head_blk buffer, but it is not
892 xfs_daddr_t
*head_blk
,
893 xfs_daddr_t
*tail_blk
)
895 xlog_rec_header_t
*rhead
;
896 xlog_op_header_t
*op_head
;
897 xfs_caddr_t offset
= NULL
;
900 xfs_daddr_t umount_data_blk
;
901 xfs_daddr_t after_umount_blk
;
908 * Find previous log record
910 if ((error
= xlog_find_head(log
, head_blk
)))
913 bp
= xlog_get_bp(log
, 1);
916 if (*head_blk
== 0) { /* special case */
917 error
= xlog_bread(log
, 0, 1, bp
, &offset
);
921 if (xlog_get_cycle(offset
) == 0) {
923 /* leave all other log inited values alone */
929 * Search backwards looking for log record header block
931 ASSERT(*head_blk
< INT_MAX
);
932 for (i
= (int)(*head_blk
) - 1; i
>= 0; i
--) {
933 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
937 if (*(__be32
*)offset
== cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
943 * If we haven't found the log record header block, start looking
944 * again from the end of the physical log. XXXmiken: There should be
945 * a check here to make sure we didn't search more than N blocks in
949 for (i
= log
->l_logBBsize
- 1; i
>= (int)(*head_blk
); i
--) {
950 error
= xlog_bread(log
, i
, 1, bp
, &offset
);
954 if (*(__be32
*)offset
==
955 cpu_to_be32(XLOG_HEADER_MAGIC_NUM
)) {
962 xfs_warn(log
->l_mp
, "%s: couldn't find sync record", __func__
);
968 /* find blk_no of tail of log */
969 rhead
= (xlog_rec_header_t
*)offset
;
970 *tail_blk
= BLOCK_LSN(be64_to_cpu(rhead
->h_tail_lsn
));
973 * Reset log values according to the state of the log when we
974 * crashed. In the case where head_blk == 0, we bump curr_cycle
975 * one because the next write starts a new cycle rather than
976 * continuing the cycle of the last good log record. At this
977 * point we have guaranteed that all partial log records have been
978 * accounted for. Therefore, we know that the last good log record
979 * written was complete and ended exactly on the end boundary
980 * of the physical log.
982 log
->l_prev_block
= i
;
983 log
->l_curr_block
= (int)*head_blk
;
984 log
->l_curr_cycle
= be32_to_cpu(rhead
->h_cycle
);
987 atomic64_set(&log
->l_tail_lsn
, be64_to_cpu(rhead
->h_tail_lsn
));
988 atomic64_set(&log
->l_last_sync_lsn
, be64_to_cpu(rhead
->h_lsn
));
989 xlog_assign_grant_head(&log
->l_reserve_head
.grant
, log
->l_curr_cycle
,
990 BBTOB(log
->l_curr_block
));
991 xlog_assign_grant_head(&log
->l_write_head
.grant
, log
->l_curr_cycle
,
992 BBTOB(log
->l_curr_block
));
995 * Look for unmount record. If we find it, then we know there
996 * was a clean unmount. Since 'i' could be the last block in
997 * the physical log, we convert to a log block before comparing
1000 * Save the current tail lsn to use to pass to
1001 * xlog_clear_stale_blocks() below. We won't want to clear the
1002 * unmount record if there is one, so we pass the lsn of the
1003 * unmount record rather than the block after it.
1005 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
1006 int h_size
= be32_to_cpu(rhead
->h_size
);
1007 int h_version
= be32_to_cpu(rhead
->h_version
);
1009 if ((h_version
& XLOG_VERSION_2
) &&
1010 (h_size
> XLOG_HEADER_CYCLE_SIZE
)) {
1011 hblks
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
1012 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
1020 after_umount_blk
= (i
+ hblks
+ (int)
1021 BTOBB(be32_to_cpu(rhead
->h_len
))) % log
->l_logBBsize
;
1022 tail_lsn
= atomic64_read(&log
->l_tail_lsn
);
1023 if (*head_blk
== after_umount_blk
&&
1024 be32_to_cpu(rhead
->h_num_logops
) == 1) {
1025 umount_data_blk
= (i
+ hblks
) % log
->l_logBBsize
;
1026 error
= xlog_bread(log
, umount_data_blk
, 1, bp
, &offset
);
1030 op_head
= (xlog_op_header_t
*)offset
;
1031 if (op_head
->oh_flags
& XLOG_UNMOUNT_TRANS
) {
1033 * Set tail and last sync so that newly written
1034 * log records will point recovery to after the
1035 * current unmount record.
1037 xlog_assign_atomic_lsn(&log
->l_tail_lsn
,
1038 log
->l_curr_cycle
, after_umount_blk
);
1039 xlog_assign_atomic_lsn(&log
->l_last_sync_lsn
,
1040 log
->l_curr_cycle
, after_umount_blk
);
1041 *tail_blk
= after_umount_blk
;
1044 * Note that the unmount was clean. If the unmount
1045 * was not clean, we need to know this to rebuild the
1046 * superblock counters from the perag headers if we
1047 * have a filesystem using non-persistent counters.
1049 log
->l_mp
->m_flags
|= XFS_MOUNT_WAS_CLEAN
;
1054 * Make sure that there are no blocks in front of the head
1055 * with the same cycle number as the head. This can happen
1056 * because we allow multiple outstanding log writes concurrently,
1057 * and the later writes might make it out before earlier ones.
1059 * We use the lsn from before modifying it so that we'll never
1060 * overwrite the unmount record after a clean unmount.
1062 * Do this only if we are going to recover the filesystem
1064 * NOTE: This used to say "if (!readonly)"
1065 * However on Linux, we can & do recover a read-only filesystem.
1066 * We only skip recovery if NORECOVERY is specified on mount,
1067 * in which case we would not be here.
1069 * But... if the -device- itself is readonly, just skip this.
1070 * We can't recover this device anyway, so it won't matter.
1072 if (!xfs_readonly_buftarg(log
->l_mp
->m_logdev_targp
))
1073 error
= xlog_clear_stale_blocks(log
, tail_lsn
);
1079 xfs_warn(log
->l_mp
, "failed to locate log tail");
1084 * Is the log zeroed at all?
1086 * The last binary search should be changed to perform an X block read
1087 * once X becomes small enough. You can then search linearly through
1088 * the X blocks. This will cut down on the number of reads we need to do.
1090 * If the log is partially zeroed, this routine will pass back the blkno
1091 * of the first block with cycle number 0. It won't have a complete LR
1095 * 0 => the log is completely written to
1096 * 1 => use *blk_no as the first block of the log
1097 * <0 => error has occurred
1102 xfs_daddr_t
*blk_no
)
1106 uint first_cycle
, last_cycle
;
1107 xfs_daddr_t new_blk
, last_blk
, start_blk
;
1108 xfs_daddr_t num_scan_bblks
;
1109 int error
, log_bbnum
= log
->l_logBBsize
;
1113 /* check totally zeroed log */
1114 bp
= xlog_get_bp(log
, 1);
1117 error
= xlog_bread(log
, 0, 1, bp
, &offset
);
1121 first_cycle
= xlog_get_cycle(offset
);
1122 if (first_cycle
== 0) { /* completely zeroed log */
1128 /* check partially zeroed log */
1129 error
= xlog_bread(log
, log_bbnum
-1, 1, bp
, &offset
);
1133 last_cycle
= xlog_get_cycle(offset
);
1134 if (last_cycle
!= 0) { /* log completely written to */
1137 } else if (first_cycle
!= 1) {
1139 * If the cycle of the last block is zero, the cycle of
1140 * the first block must be 1. If it's not, maybe we're
1141 * not looking at a log... Bail out.
1144 "Log inconsistent or not a log (last==0, first!=1)");
1149 /* we have a partially zeroed log */
1150 last_blk
= log_bbnum
-1;
1151 if ((error
= xlog_find_cycle_start(log
, bp
, 0, &last_blk
, 0)))
1155 * Validate the answer. Because there is no way to guarantee that
1156 * the entire log is made up of log records which are the same size,
1157 * we scan over the defined maximum blocks. At this point, the maximum
1158 * is not chosen to mean anything special. XXXmiken
1160 num_scan_bblks
= XLOG_TOTAL_REC_SHIFT(log
);
1161 ASSERT(num_scan_bblks
<= INT_MAX
);
1163 if (last_blk
< num_scan_bblks
)
1164 num_scan_bblks
= last_blk
;
1165 start_blk
= last_blk
- num_scan_bblks
;
1168 * We search for any instances of cycle number 0 that occur before
1169 * our current estimate of the head. What we're trying to detect is
1170 * 1 ... | 0 | 1 | 0...
1171 * ^ binary search ends here
1173 if ((error
= xlog_find_verify_cycle(log
, start_blk
,
1174 (int)num_scan_bblks
, 0, &new_blk
)))
1180 * Potentially backup over partial log record write. We don't need
1181 * to search the end of the log because we know it is zero.
1183 error
= xlog_find_verify_log_record(log
, start_blk
, &last_blk
, 0);
1198 * These are simple subroutines used by xlog_clear_stale_blocks() below
1199 * to initialize a buffer full of empty log record headers and write
1200 * them into the log.
1211 xlog_rec_header_t
*recp
= (xlog_rec_header_t
*)buf
;
1213 memset(buf
, 0, BBSIZE
);
1214 recp
->h_magicno
= cpu_to_be32(XLOG_HEADER_MAGIC_NUM
);
1215 recp
->h_cycle
= cpu_to_be32(cycle
);
1216 recp
->h_version
= cpu_to_be32(
1217 xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
) ? 2 : 1);
1218 recp
->h_lsn
= cpu_to_be64(xlog_assign_lsn(cycle
, block
));
1219 recp
->h_tail_lsn
= cpu_to_be64(xlog_assign_lsn(tail_cycle
, tail_block
));
1220 recp
->h_fmt
= cpu_to_be32(XLOG_FMT
);
1221 memcpy(&recp
->h_fs_uuid
, &log
->l_mp
->m_sb
.sb_uuid
, sizeof(uuid_t
));
1225 xlog_write_log_records(
1236 int sectbb
= log
->l_sectBBsize
;
1237 int end_block
= start_block
+ blocks
;
1243 * Greedily allocate a buffer big enough to handle the full
1244 * range of basic blocks to be written. If that fails, try
1245 * a smaller size. We need to be able to write at least a
1246 * log sector, or we're out of luck.
1248 bufblks
= 1 << ffs(blocks
);
1249 while (bufblks
> log
->l_logBBsize
)
1251 while (!(bp
= xlog_get_bp(log
, bufblks
))) {
1253 if (bufblks
< sectbb
)
1257 /* We may need to do a read at the start to fill in part of
1258 * the buffer in the starting sector not covered by the first
1261 balign
= round_down(start_block
, sectbb
);
1262 if (balign
!= start_block
) {
1263 error
= xlog_bread_noalign(log
, start_block
, 1, bp
);
1267 j
= start_block
- balign
;
1270 for (i
= start_block
; i
< end_block
; i
+= bufblks
) {
1271 int bcount
, endcount
;
1273 bcount
= min(bufblks
, end_block
- start_block
);
1274 endcount
= bcount
- j
;
1276 /* We may need to do a read at the end to fill in part of
1277 * the buffer in the final sector not covered by the write.
1278 * If this is the same sector as the above read, skip it.
1280 ealign
= round_down(end_block
, sectbb
);
1281 if (j
== 0 && (start_block
+ endcount
> ealign
)) {
1282 offset
= bp
->b_addr
+ BBTOB(ealign
- start_block
);
1283 error
= xlog_bread_offset(log
, ealign
, sectbb
,
1290 offset
= xlog_align(log
, start_block
, endcount
, bp
);
1291 for (; j
< endcount
; j
++) {
1292 xlog_add_record(log
, offset
, cycle
, i
+j
,
1293 tail_cycle
, tail_block
);
1296 error
= xlog_bwrite(log
, start_block
, endcount
, bp
);
1299 start_block
+= endcount
;
1309 * This routine is called to blow away any incomplete log writes out
1310 * in front of the log head. We do this so that we won't become confused
1311 * if we come up, write only a little bit more, and then crash again.
1312 * If we leave the partial log records out there, this situation could
1313 * cause us to think those partial writes are valid blocks since they
1314 * have the current cycle number. We get rid of them by overwriting them
1315 * with empty log records with the old cycle number rather than the
1318 * The tail lsn is passed in rather than taken from
1319 * the log so that we will not write over the unmount record after a
1320 * clean unmount in a 512 block log. Doing so would leave the log without
1321 * any valid log records in it until a new one was written. If we crashed
1322 * during that time we would not be able to recover.
1325 xlog_clear_stale_blocks(
1329 int tail_cycle
, head_cycle
;
1330 int tail_block
, head_block
;
1331 int tail_distance
, max_distance
;
1335 tail_cycle
= CYCLE_LSN(tail_lsn
);
1336 tail_block
= BLOCK_LSN(tail_lsn
);
1337 head_cycle
= log
->l_curr_cycle
;
1338 head_block
= log
->l_curr_block
;
1341 * Figure out the distance between the new head of the log
1342 * and the tail. We want to write over any blocks beyond the
1343 * head that we may have written just before the crash, but
1344 * we don't want to overwrite the tail of the log.
1346 if (head_cycle
== tail_cycle
) {
1348 * The tail is behind the head in the physical log,
1349 * so the distance from the head to the tail is the
1350 * distance from the head to the end of the log plus
1351 * the distance from the beginning of the log to the
1354 if (unlikely(head_block
< tail_block
|| head_block
>= log
->l_logBBsize
)) {
1355 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)",
1356 XFS_ERRLEVEL_LOW
, log
->l_mp
);
1357 return -EFSCORRUPTED
;
1359 tail_distance
= tail_block
+ (log
->l_logBBsize
- head_block
);
1362 * The head is behind the tail in the physical log,
1363 * so the distance from the head to the tail is just
1364 * the tail block minus the head block.
1366 if (unlikely(head_block
>= tail_block
|| head_cycle
!= (tail_cycle
+ 1))){
1367 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)",
1368 XFS_ERRLEVEL_LOW
, log
->l_mp
);
1369 return -EFSCORRUPTED
;
1371 tail_distance
= tail_block
- head_block
;
1375 * If the head is right up against the tail, we can't clear
1378 if (tail_distance
<= 0) {
1379 ASSERT(tail_distance
== 0);
1383 max_distance
= XLOG_TOTAL_REC_SHIFT(log
);
1385 * Take the smaller of the maximum amount of outstanding I/O
1386 * we could have and the distance to the tail to clear out.
1387 * We take the smaller so that we don't overwrite the tail and
1388 * we don't waste all day writing from the head to the tail
1391 max_distance
= MIN(max_distance
, tail_distance
);
1393 if ((head_block
+ max_distance
) <= log
->l_logBBsize
) {
1395 * We can stomp all the blocks we need to without
1396 * wrapping around the end of the log. Just do it
1397 * in a single write. Use the cycle number of the
1398 * current cycle minus one so that the log will look like:
1401 error
= xlog_write_log_records(log
, (head_cycle
- 1),
1402 head_block
, max_distance
, tail_cycle
,
1408 * We need to wrap around the end of the physical log in
1409 * order to clear all the blocks. Do it in two separate
1410 * I/Os. The first write should be from the head to the
1411 * end of the physical log, and it should use the current
1412 * cycle number minus one just like above.
1414 distance
= log
->l_logBBsize
- head_block
;
1415 error
= xlog_write_log_records(log
, (head_cycle
- 1),
1416 head_block
, distance
, tail_cycle
,
1423 * Now write the blocks at the start of the physical log.
1424 * This writes the remainder of the blocks we want to clear.
1425 * It uses the current cycle number since we're now on the
1426 * same cycle as the head so that we get:
1427 * n ... n ... | n - 1 ...
1428 * ^^^^^ blocks we're writing
1430 distance
= max_distance
- (log
->l_logBBsize
- head_block
);
1431 error
= xlog_write_log_records(log
, head_cycle
, 0, distance
,
1432 tail_cycle
, tail_block
);
1440 /******************************************************************************
1442 * Log recover routines
1444 ******************************************************************************
1448 * Sort the log items in the transaction.
1450 * The ordering constraints are defined by the inode allocation and unlink
1451 * behaviour. The rules are:
1453 * 1. Every item is only logged once in a given transaction. Hence it
1454 * represents the last logged state of the item. Hence ordering is
1455 * dependent on the order in which operations need to be performed so
1456 * required initial conditions are always met.
1458 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1459 * there's nothing to replay from them so we can simply cull them
1460 * from the transaction. However, we can't do that until after we've
1461 * replayed all the other items because they may be dependent on the
1462 * cancelled buffer and replaying the cancelled buffer can remove it
1463 * form the cancelled buffer table. Hence they have tobe done last.
1465 * 3. Inode allocation buffers must be replayed before inode items that
1466 * read the buffer and replay changes into it. For filesystems using the
1467 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1468 * treated the same as inode allocation buffers as they create and
1469 * initialise the buffers directly.
1471 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1472 * This ensures that inodes are completely flushed to the inode buffer
1473 * in a "free" state before we remove the unlinked inode list pointer.
1475 * Hence the ordering needs to be inode allocation buffers first, inode items
1476 * second, inode unlink buffers third and cancelled buffers last.
1478 * But there's a problem with that - we can't tell an inode allocation buffer
1479 * apart from a regular buffer, so we can't separate them. We can, however,
1480 * tell an inode unlink buffer from the others, and so we can separate them out
1481 * from all the other buffers and move them to last.
1483 * Hence, 4 lists, in order from head to tail:
1484 * - buffer_list for all buffers except cancelled/inode unlink buffers
1485 * - item_list for all non-buffer items
1486 * - inode_buffer_list for inode unlink buffers
1487 * - cancel_list for the cancelled buffers
1489 * Note that we add objects to the tail of the lists so that first-to-last
1490 * ordering is preserved within the lists. Adding objects to the head of the
1491 * list means when we traverse from the head we walk them in last-to-first
1492 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1493 * but for all other items there may be specific ordering that we need to
1497 xlog_recover_reorder_trans(
1499 struct xlog_recover
*trans
,
1502 xlog_recover_item_t
*item
, *n
;
1504 LIST_HEAD(sort_list
);
1505 LIST_HEAD(cancel_list
);
1506 LIST_HEAD(buffer_list
);
1507 LIST_HEAD(inode_buffer_list
);
1508 LIST_HEAD(inode_list
);
1510 list_splice_init(&trans
->r_itemq
, &sort_list
);
1511 list_for_each_entry_safe(item
, n
, &sort_list
, ri_list
) {
1512 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
1514 switch (ITEM_TYPE(item
)) {
1515 case XFS_LI_ICREATE
:
1516 list_move_tail(&item
->ri_list
, &buffer_list
);
1519 if (buf_f
->blf_flags
& XFS_BLF_CANCEL
) {
1520 trace_xfs_log_recover_item_reorder_head(log
,
1522 list_move(&item
->ri_list
, &cancel_list
);
1525 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
) {
1526 list_move(&item
->ri_list
, &inode_buffer_list
);
1529 list_move_tail(&item
->ri_list
, &buffer_list
);
1533 case XFS_LI_QUOTAOFF
:
1536 trace_xfs_log_recover_item_reorder_tail(log
,
1538 list_move_tail(&item
->ri_list
, &inode_list
);
1542 "%s: unrecognized type of log operation",
1546 * return the remaining items back to the transaction
1547 * item list so they can be freed in caller.
1549 if (!list_empty(&sort_list
))
1550 list_splice_init(&sort_list
, &trans
->r_itemq
);
1556 ASSERT(list_empty(&sort_list
));
1557 if (!list_empty(&buffer_list
))
1558 list_splice(&buffer_list
, &trans
->r_itemq
);
1559 if (!list_empty(&inode_list
))
1560 list_splice_tail(&inode_list
, &trans
->r_itemq
);
1561 if (!list_empty(&inode_buffer_list
))
1562 list_splice_tail(&inode_buffer_list
, &trans
->r_itemq
);
1563 if (!list_empty(&cancel_list
))
1564 list_splice_tail(&cancel_list
, &trans
->r_itemq
);
1569 * Build up the table of buf cancel records so that we don't replay
1570 * cancelled data in the second pass. For buffer records that are
1571 * not cancel records, there is nothing to do here so we just return.
1573 * If we get a cancel record which is already in the table, this indicates
1574 * that the buffer was cancelled multiple times. In order to ensure
1575 * that during pass 2 we keep the record in the table until we reach its
1576 * last occurrence in the log, we keep a reference count in the cancel
1577 * record in the table to tell us how many times we expect to see this
1578 * record during the second pass.
1581 xlog_recover_buffer_pass1(
1583 struct xlog_recover_item
*item
)
1585 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
1586 struct list_head
*bucket
;
1587 struct xfs_buf_cancel
*bcp
;
1590 * If this isn't a cancel buffer item, then just return.
1592 if (!(buf_f
->blf_flags
& XFS_BLF_CANCEL
)) {
1593 trace_xfs_log_recover_buf_not_cancel(log
, buf_f
);
1598 * Insert an xfs_buf_cancel record into the hash table of them.
1599 * If there is already an identical record, bump its reference count.
1601 bucket
= XLOG_BUF_CANCEL_BUCKET(log
, buf_f
->blf_blkno
);
1602 list_for_each_entry(bcp
, bucket
, bc_list
) {
1603 if (bcp
->bc_blkno
== buf_f
->blf_blkno
&&
1604 bcp
->bc_len
== buf_f
->blf_len
) {
1606 trace_xfs_log_recover_buf_cancel_ref_inc(log
, buf_f
);
1611 bcp
= kmem_alloc(sizeof(struct xfs_buf_cancel
), KM_SLEEP
);
1612 bcp
->bc_blkno
= buf_f
->blf_blkno
;
1613 bcp
->bc_len
= buf_f
->blf_len
;
1614 bcp
->bc_refcount
= 1;
1615 list_add_tail(&bcp
->bc_list
, bucket
);
1617 trace_xfs_log_recover_buf_cancel_add(log
, buf_f
);
1622 * Check to see whether the buffer being recovered has a corresponding
1623 * entry in the buffer cancel record table. If it is, return the cancel
1624 * buffer structure to the caller.
1626 STATIC
struct xfs_buf_cancel
*
1627 xlog_peek_buffer_cancelled(
1633 struct list_head
*bucket
;
1634 struct xfs_buf_cancel
*bcp
;
1636 if (!log
->l_buf_cancel_table
) {
1637 /* empty table means no cancelled buffers in the log */
1638 ASSERT(!(flags
& XFS_BLF_CANCEL
));
1642 bucket
= XLOG_BUF_CANCEL_BUCKET(log
, blkno
);
1643 list_for_each_entry(bcp
, bucket
, bc_list
) {
1644 if (bcp
->bc_blkno
== blkno
&& bcp
->bc_len
== len
)
1649 * We didn't find a corresponding entry in the table, so return 0 so
1650 * that the buffer is NOT cancelled.
1652 ASSERT(!(flags
& XFS_BLF_CANCEL
));
1657 * If the buffer is being cancelled then return 1 so that it will be cancelled,
1658 * otherwise return 0. If the buffer is actually a buffer cancel item
1659 * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
1660 * table and remove it from the table if this is the last reference.
1662 * We remove the cancel record from the table when we encounter its last
1663 * occurrence in the log so that if the same buffer is re-used again after its
1664 * last cancellation we actually replay the changes made at that point.
1667 xlog_check_buffer_cancelled(
1673 struct xfs_buf_cancel
*bcp
;
1675 bcp
= xlog_peek_buffer_cancelled(log
, blkno
, len
, flags
);
1680 * We've go a match, so return 1 so that the recovery of this buffer
1681 * is cancelled. If this buffer is actually a buffer cancel log
1682 * item, then decrement the refcount on the one in the table and
1683 * remove it if this is the last reference.
1685 if (flags
& XFS_BLF_CANCEL
) {
1686 if (--bcp
->bc_refcount
== 0) {
1687 list_del(&bcp
->bc_list
);
1695 * Perform recovery for a buffer full of inodes. In these buffers, the only
1696 * data which should be recovered is that which corresponds to the
1697 * di_next_unlinked pointers in the on disk inode structures. The rest of the
1698 * data for the inodes is always logged through the inodes themselves rather
1699 * than the inode buffer and is recovered in xlog_recover_inode_pass2().
1701 * The only time when buffers full of inodes are fully recovered is when the
1702 * buffer is full of newly allocated inodes. In this case the buffer will
1703 * not be marked as an inode buffer and so will be sent to
1704 * xlog_recover_do_reg_buffer() below during recovery.
1707 xlog_recover_do_inode_buffer(
1708 struct xfs_mount
*mp
,
1709 xlog_recover_item_t
*item
,
1711 xfs_buf_log_format_t
*buf_f
)
1717 int reg_buf_offset
= 0;
1718 int reg_buf_bytes
= 0;
1719 int next_unlinked_offset
;
1721 xfs_agino_t
*logged_nextp
;
1722 xfs_agino_t
*buffer_nextp
;
1724 trace_xfs_log_recover_buf_inode_buf(mp
->m_log
, buf_f
);
1727 * Post recovery validation only works properly on CRC enabled
1730 if (xfs_sb_version_hascrc(&mp
->m_sb
))
1731 bp
->b_ops
= &xfs_inode_buf_ops
;
1733 inodes_per_buf
= BBTOB(bp
->b_io_length
) >> mp
->m_sb
.sb_inodelog
;
1734 for (i
= 0; i
< inodes_per_buf
; i
++) {
1735 next_unlinked_offset
= (i
* mp
->m_sb
.sb_inodesize
) +
1736 offsetof(xfs_dinode_t
, di_next_unlinked
);
1738 while (next_unlinked_offset
>=
1739 (reg_buf_offset
+ reg_buf_bytes
)) {
1741 * The next di_next_unlinked field is beyond
1742 * the current logged region. Find the next
1743 * logged region that contains or is beyond
1744 * the current di_next_unlinked field.
1747 bit
= xfs_next_bit(buf_f
->blf_data_map
,
1748 buf_f
->blf_map_size
, bit
);
1751 * If there are no more logged regions in the
1752 * buffer, then we're done.
1757 nbits
= xfs_contig_bits(buf_f
->blf_data_map
,
1758 buf_f
->blf_map_size
, bit
);
1760 reg_buf_offset
= bit
<< XFS_BLF_SHIFT
;
1761 reg_buf_bytes
= nbits
<< XFS_BLF_SHIFT
;
1766 * If the current logged region starts after the current
1767 * di_next_unlinked field, then move on to the next
1768 * di_next_unlinked field.
1770 if (next_unlinked_offset
< reg_buf_offset
)
1773 ASSERT(item
->ri_buf
[item_index
].i_addr
!= NULL
);
1774 ASSERT((item
->ri_buf
[item_index
].i_len
% XFS_BLF_CHUNK
) == 0);
1775 ASSERT((reg_buf_offset
+ reg_buf_bytes
) <=
1776 BBTOB(bp
->b_io_length
));
1779 * The current logged region contains a copy of the
1780 * current di_next_unlinked field. Extract its value
1781 * and copy it to the buffer copy.
1783 logged_nextp
= item
->ri_buf
[item_index
].i_addr
+
1784 next_unlinked_offset
- reg_buf_offset
;
1785 if (unlikely(*logged_nextp
== 0)) {
1787 "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). "
1788 "Trying to replay bad (0) inode di_next_unlinked field.",
1790 XFS_ERROR_REPORT("xlog_recover_do_inode_buf",
1791 XFS_ERRLEVEL_LOW
, mp
);
1792 return -EFSCORRUPTED
;
1795 buffer_nextp
= (xfs_agino_t
*)xfs_buf_offset(bp
,
1796 next_unlinked_offset
);
1797 *buffer_nextp
= *logged_nextp
;
1800 * If necessary, recalculate the CRC in the on-disk inode. We
1801 * have to leave the inode in a consistent state for whoever
1804 xfs_dinode_calc_crc(mp
, (struct xfs_dinode
*)
1805 xfs_buf_offset(bp
, i
* mp
->m_sb
.sb_inodesize
));
1813 * V5 filesystems know the age of the buffer on disk being recovered. We can
1814 * have newer objects on disk than we are replaying, and so for these cases we
1815 * don't want to replay the current change as that will make the buffer contents
1816 * temporarily invalid on disk.
1818 * The magic number might not match the buffer type we are going to recover
1819 * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
1820 * extract the LSN of the existing object in the buffer based on it's current
1821 * magic number. If we don't recognise the magic number in the buffer, then
1822 * return a LSN of -1 so that the caller knows it was an unrecognised block and
1823 * so can recover the buffer.
1825 * Note: we cannot rely solely on magic number matches to determine that the
1826 * buffer has a valid LSN - we also need to verify that it belongs to this
1827 * filesystem, so we need to extract the object's LSN and compare it to that
1828 * which we read from the superblock. If the UUIDs don't match, then we've got a
1829 * stale metadata block from an old filesystem instance that we need to recover
1833 xlog_recover_get_buf_lsn(
1834 struct xfs_mount
*mp
,
1840 void *blk
= bp
->b_addr
;
1844 /* v4 filesystems always recover immediately */
1845 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
1846 goto recover_immediately
;
1848 magic32
= be32_to_cpu(*(__be32
*)blk
);
1850 case XFS_ABTB_CRC_MAGIC
:
1851 case XFS_ABTC_CRC_MAGIC
:
1852 case XFS_ABTB_MAGIC
:
1853 case XFS_ABTC_MAGIC
:
1854 case XFS_IBT_CRC_MAGIC
:
1855 case XFS_IBT_MAGIC
: {
1856 struct xfs_btree_block
*btb
= blk
;
1858 lsn
= be64_to_cpu(btb
->bb_u
.s
.bb_lsn
);
1859 uuid
= &btb
->bb_u
.s
.bb_uuid
;
1862 case XFS_BMAP_CRC_MAGIC
:
1863 case XFS_BMAP_MAGIC
: {
1864 struct xfs_btree_block
*btb
= blk
;
1866 lsn
= be64_to_cpu(btb
->bb_u
.l
.bb_lsn
);
1867 uuid
= &btb
->bb_u
.l
.bb_uuid
;
1871 lsn
= be64_to_cpu(((struct xfs_agf
*)blk
)->agf_lsn
);
1872 uuid
= &((struct xfs_agf
*)blk
)->agf_uuid
;
1874 case XFS_AGFL_MAGIC
:
1875 lsn
= be64_to_cpu(((struct xfs_agfl
*)blk
)->agfl_lsn
);
1876 uuid
= &((struct xfs_agfl
*)blk
)->agfl_uuid
;
1879 lsn
= be64_to_cpu(((struct xfs_agi
*)blk
)->agi_lsn
);
1880 uuid
= &((struct xfs_agi
*)blk
)->agi_uuid
;
1882 case XFS_SYMLINK_MAGIC
:
1883 lsn
= be64_to_cpu(((struct xfs_dsymlink_hdr
*)blk
)->sl_lsn
);
1884 uuid
= &((struct xfs_dsymlink_hdr
*)blk
)->sl_uuid
;
1886 case XFS_DIR3_BLOCK_MAGIC
:
1887 case XFS_DIR3_DATA_MAGIC
:
1888 case XFS_DIR3_FREE_MAGIC
:
1889 lsn
= be64_to_cpu(((struct xfs_dir3_blk_hdr
*)blk
)->lsn
);
1890 uuid
= &((struct xfs_dir3_blk_hdr
*)blk
)->uuid
;
1892 case XFS_ATTR3_RMT_MAGIC
:
1893 lsn
= be64_to_cpu(((struct xfs_attr3_rmt_hdr
*)blk
)->rm_lsn
);
1894 uuid
= &((struct xfs_attr3_rmt_hdr
*)blk
)->rm_uuid
;
1897 lsn
= be64_to_cpu(((struct xfs_dsb
*)blk
)->sb_lsn
);
1898 uuid
= &((struct xfs_dsb
*)blk
)->sb_uuid
;
1904 if (lsn
!= (xfs_lsn_t
)-1) {
1905 if (!uuid_equal(&mp
->m_sb
.sb_uuid
, uuid
))
1906 goto recover_immediately
;
1910 magicda
= be16_to_cpu(((struct xfs_da_blkinfo
*)blk
)->magic
);
1912 case XFS_DIR3_LEAF1_MAGIC
:
1913 case XFS_DIR3_LEAFN_MAGIC
:
1914 case XFS_DA3_NODE_MAGIC
:
1915 lsn
= be64_to_cpu(((struct xfs_da3_blkinfo
*)blk
)->lsn
);
1916 uuid
= &((struct xfs_da3_blkinfo
*)blk
)->uuid
;
1922 if (lsn
!= (xfs_lsn_t
)-1) {
1923 if (!uuid_equal(&mp
->m_sb
.sb_uuid
, uuid
))
1924 goto recover_immediately
;
1929 * We do individual object checks on dquot and inode buffers as they
1930 * have their own individual LSN records. Also, we could have a stale
1931 * buffer here, so we have to at least recognise these buffer types.
1933 * A notd complexity here is inode unlinked list processing - it logs
1934 * the inode directly in the buffer, but we don't know which inodes have
1935 * been modified, and there is no global buffer LSN. Hence we need to
1936 * recover all inode buffer types immediately. This problem will be
1937 * fixed by logical logging of the unlinked list modifications.
1939 magic16
= be16_to_cpu(*(__be16
*)blk
);
1941 case XFS_DQUOT_MAGIC
:
1942 case XFS_DINODE_MAGIC
:
1943 goto recover_immediately
;
1948 /* unknown buffer contents, recover immediately */
1950 recover_immediately
:
1951 return (xfs_lsn_t
)-1;
1956 * Validate the recovered buffer is of the correct type and attach the
1957 * appropriate buffer operations to them for writeback. Magic numbers are in a
1959 * the first 16 bits of the buffer (inode buffer, dquot buffer),
1960 * the first 32 bits of the buffer (most blocks),
1961 * inside a struct xfs_da_blkinfo at the start of the buffer.
1964 xlog_recover_validate_buf_type(
1965 struct xfs_mount
*mp
,
1967 xfs_buf_log_format_t
*buf_f
)
1969 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
1975 * We can only do post recovery validation on items on CRC enabled
1976 * fielsystems as we need to know when the buffer was written to be able
1977 * to determine if we should have replayed the item. If we replay old
1978 * metadata over a newer buffer, then it will enter a temporarily
1979 * inconsistent state resulting in verification failures. Hence for now
1980 * just avoid the verification stage for non-crc filesystems
1982 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
1985 magic32
= be32_to_cpu(*(__be32
*)bp
->b_addr
);
1986 magic16
= be16_to_cpu(*(__be16
*)bp
->b_addr
);
1987 magicda
= be16_to_cpu(info
->magic
);
1988 switch (xfs_blft_from_flags(buf_f
)) {
1989 case XFS_BLFT_BTREE_BUF
:
1991 case XFS_ABTB_CRC_MAGIC
:
1992 case XFS_ABTC_CRC_MAGIC
:
1993 case XFS_ABTB_MAGIC
:
1994 case XFS_ABTC_MAGIC
:
1995 bp
->b_ops
= &xfs_allocbt_buf_ops
;
1997 case XFS_IBT_CRC_MAGIC
:
1998 case XFS_FIBT_CRC_MAGIC
:
2000 case XFS_FIBT_MAGIC
:
2001 bp
->b_ops
= &xfs_inobt_buf_ops
;
2003 case XFS_BMAP_CRC_MAGIC
:
2004 case XFS_BMAP_MAGIC
:
2005 bp
->b_ops
= &xfs_bmbt_buf_ops
;
2008 xfs_warn(mp
, "Bad btree block magic!");
2013 case XFS_BLFT_AGF_BUF
:
2014 if (magic32
!= XFS_AGF_MAGIC
) {
2015 xfs_warn(mp
, "Bad AGF block magic!");
2019 bp
->b_ops
= &xfs_agf_buf_ops
;
2021 case XFS_BLFT_AGFL_BUF
:
2022 if (magic32
!= XFS_AGFL_MAGIC
) {
2023 xfs_warn(mp
, "Bad AGFL block magic!");
2027 bp
->b_ops
= &xfs_agfl_buf_ops
;
2029 case XFS_BLFT_AGI_BUF
:
2030 if (magic32
!= XFS_AGI_MAGIC
) {
2031 xfs_warn(mp
, "Bad AGI block magic!");
2035 bp
->b_ops
= &xfs_agi_buf_ops
;
2037 case XFS_BLFT_UDQUOT_BUF
:
2038 case XFS_BLFT_PDQUOT_BUF
:
2039 case XFS_BLFT_GDQUOT_BUF
:
2040 #ifdef CONFIG_XFS_QUOTA
2041 if (magic16
!= XFS_DQUOT_MAGIC
) {
2042 xfs_warn(mp
, "Bad DQUOT block magic!");
2046 bp
->b_ops
= &xfs_dquot_buf_ops
;
2049 "Trying to recover dquots without QUOTA support built in!");
2053 case XFS_BLFT_DINO_BUF
:
2054 if (magic16
!= XFS_DINODE_MAGIC
) {
2055 xfs_warn(mp
, "Bad INODE block magic!");
2059 bp
->b_ops
= &xfs_inode_buf_ops
;
2061 case XFS_BLFT_SYMLINK_BUF
:
2062 if (magic32
!= XFS_SYMLINK_MAGIC
) {
2063 xfs_warn(mp
, "Bad symlink block magic!");
2067 bp
->b_ops
= &xfs_symlink_buf_ops
;
2069 case XFS_BLFT_DIR_BLOCK_BUF
:
2070 if (magic32
!= XFS_DIR2_BLOCK_MAGIC
&&
2071 magic32
!= XFS_DIR3_BLOCK_MAGIC
) {
2072 xfs_warn(mp
, "Bad dir block magic!");
2076 bp
->b_ops
= &xfs_dir3_block_buf_ops
;
2078 case XFS_BLFT_DIR_DATA_BUF
:
2079 if (magic32
!= XFS_DIR2_DATA_MAGIC
&&
2080 magic32
!= XFS_DIR3_DATA_MAGIC
) {
2081 xfs_warn(mp
, "Bad dir data magic!");
2085 bp
->b_ops
= &xfs_dir3_data_buf_ops
;
2087 case XFS_BLFT_DIR_FREE_BUF
:
2088 if (magic32
!= XFS_DIR2_FREE_MAGIC
&&
2089 magic32
!= XFS_DIR3_FREE_MAGIC
) {
2090 xfs_warn(mp
, "Bad dir3 free magic!");
2094 bp
->b_ops
= &xfs_dir3_free_buf_ops
;
2096 case XFS_BLFT_DIR_LEAF1_BUF
:
2097 if (magicda
!= XFS_DIR2_LEAF1_MAGIC
&&
2098 magicda
!= XFS_DIR3_LEAF1_MAGIC
) {
2099 xfs_warn(mp
, "Bad dir leaf1 magic!");
2103 bp
->b_ops
= &xfs_dir3_leaf1_buf_ops
;
2105 case XFS_BLFT_DIR_LEAFN_BUF
:
2106 if (magicda
!= XFS_DIR2_LEAFN_MAGIC
&&
2107 magicda
!= XFS_DIR3_LEAFN_MAGIC
) {
2108 xfs_warn(mp
, "Bad dir leafn magic!");
2112 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
2114 case XFS_BLFT_DA_NODE_BUF
:
2115 if (magicda
!= XFS_DA_NODE_MAGIC
&&
2116 magicda
!= XFS_DA3_NODE_MAGIC
) {
2117 xfs_warn(mp
, "Bad da node magic!");
2121 bp
->b_ops
= &xfs_da3_node_buf_ops
;
2123 case XFS_BLFT_ATTR_LEAF_BUF
:
2124 if (magicda
!= XFS_ATTR_LEAF_MAGIC
&&
2125 magicda
!= XFS_ATTR3_LEAF_MAGIC
) {
2126 xfs_warn(mp
, "Bad attr leaf magic!");
2130 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
2132 case XFS_BLFT_ATTR_RMT_BUF
:
2133 if (magic32
!= XFS_ATTR3_RMT_MAGIC
) {
2134 xfs_warn(mp
, "Bad attr remote magic!");
2138 bp
->b_ops
= &xfs_attr3_rmt_buf_ops
;
2140 case XFS_BLFT_SB_BUF
:
2141 if (magic32
!= XFS_SB_MAGIC
) {
2142 xfs_warn(mp
, "Bad SB block magic!");
2146 bp
->b_ops
= &xfs_sb_buf_ops
;
2149 xfs_warn(mp
, "Unknown buffer type %d!",
2150 xfs_blft_from_flags(buf_f
));
2156 * Perform a 'normal' buffer recovery. Each logged region of the
2157 * buffer should be copied over the corresponding region in the
2158 * given buffer. The bitmap in the buf log format structure indicates
2159 * where to place the logged data.
2162 xlog_recover_do_reg_buffer(
2163 struct xfs_mount
*mp
,
2164 xlog_recover_item_t
*item
,
2166 xfs_buf_log_format_t
*buf_f
)
2173 trace_xfs_log_recover_buf_reg_buf(mp
->m_log
, buf_f
);
2176 i
= 1; /* 0 is the buf format structure */
2178 bit
= xfs_next_bit(buf_f
->blf_data_map
,
2179 buf_f
->blf_map_size
, bit
);
2182 nbits
= xfs_contig_bits(buf_f
->blf_data_map
,
2183 buf_f
->blf_map_size
, bit
);
2185 ASSERT(item
->ri_buf
[i
].i_addr
!= NULL
);
2186 ASSERT(item
->ri_buf
[i
].i_len
% XFS_BLF_CHUNK
== 0);
2187 ASSERT(BBTOB(bp
->b_io_length
) >=
2188 ((uint
)bit
<< XFS_BLF_SHIFT
) + (nbits
<< XFS_BLF_SHIFT
));
2191 * The dirty regions logged in the buffer, even though
2192 * contiguous, may span multiple chunks. This is because the
2193 * dirty region may span a physical page boundary in a buffer
2194 * and hence be split into two separate vectors for writing into
2195 * the log. Hence we need to trim nbits back to the length of
2196 * the current region being copied out of the log.
2198 if (item
->ri_buf
[i
].i_len
< (nbits
<< XFS_BLF_SHIFT
))
2199 nbits
= item
->ri_buf
[i
].i_len
>> XFS_BLF_SHIFT
;
2202 * Do a sanity check if this is a dquot buffer. Just checking
2203 * the first dquot in the buffer should do. XXXThis is
2204 * probably a good thing to do for other buf types also.
2207 if (buf_f
->blf_flags
&
2208 (XFS_BLF_UDQUOT_BUF
|XFS_BLF_PDQUOT_BUF
|XFS_BLF_GDQUOT_BUF
)) {
2209 if (item
->ri_buf
[i
].i_addr
== NULL
) {
2211 "XFS: NULL dquot in %s.", __func__
);
2214 if (item
->ri_buf
[i
].i_len
< sizeof(xfs_disk_dquot_t
)) {
2216 "XFS: dquot too small (%d) in %s.",
2217 item
->ri_buf
[i
].i_len
, __func__
);
2220 error
= xfs_dqcheck(mp
, item
->ri_buf
[i
].i_addr
,
2221 -1, 0, XFS_QMOPT_DOWARN
,
2222 "dquot_buf_recover");
2227 memcpy(xfs_buf_offset(bp
,
2228 (uint
)bit
<< XFS_BLF_SHIFT
), /* dest */
2229 item
->ri_buf
[i
].i_addr
, /* source */
2230 nbits
<<XFS_BLF_SHIFT
); /* length */
2236 /* Shouldn't be any more regions */
2237 ASSERT(i
== item
->ri_total
);
2239 xlog_recover_validate_buf_type(mp
, bp
, buf_f
);
2243 * Perform a dquot buffer recovery.
2244 * Simple algorithm: if we have found a QUOTAOFF log item of the same type
2245 * (ie. USR or GRP), then just toss this buffer away; don't recover it.
2246 * Else, treat it as a regular buffer and do recovery.
2248 * Return false if the buffer was tossed and true if we recovered the buffer to
2249 * indicate to the caller if the buffer needs writing.
2252 xlog_recover_do_dquot_buffer(
2253 struct xfs_mount
*mp
,
2255 struct xlog_recover_item
*item
,
2257 struct xfs_buf_log_format
*buf_f
)
2261 trace_xfs_log_recover_buf_dquot_buf(log
, buf_f
);
2264 * Filesystems are required to send in quota flags at mount time.
2270 if (buf_f
->blf_flags
& XFS_BLF_UDQUOT_BUF
)
2271 type
|= XFS_DQ_USER
;
2272 if (buf_f
->blf_flags
& XFS_BLF_PDQUOT_BUF
)
2273 type
|= XFS_DQ_PROJ
;
2274 if (buf_f
->blf_flags
& XFS_BLF_GDQUOT_BUF
)
2275 type
|= XFS_DQ_GROUP
;
2277 * This type of quotas was turned off, so ignore this buffer
2279 if (log
->l_quotaoffs_flag
& type
)
2282 xlog_recover_do_reg_buffer(mp
, item
, bp
, buf_f
);
2287 * This routine replays a modification made to a buffer at runtime.
2288 * There are actually two types of buffer, regular and inode, which
2289 * are handled differently. Inode buffers are handled differently
2290 * in that we only recover a specific set of data from them, namely
2291 * the inode di_next_unlinked fields. This is because all other inode
2292 * data is actually logged via inode records and any data we replay
2293 * here which overlaps that may be stale.
2295 * When meta-data buffers are freed at run time we log a buffer item
2296 * with the XFS_BLF_CANCEL bit set to indicate that previous copies
2297 * of the buffer in the log should not be replayed at recovery time.
2298 * This is so that if the blocks covered by the buffer are reused for
2299 * file data before we crash we don't end up replaying old, freed
2300 * meta-data into a user's file.
2302 * To handle the cancellation of buffer log items, we make two passes
2303 * over the log during recovery. During the first we build a table of
2304 * those buffers which have been cancelled, and during the second we
2305 * only replay those buffers which do not have corresponding cancel
2306 * records in the table. See xlog_recover_buffer_pass[1,2] above
2307 * for more details on the implementation of the table of cancel records.
2310 xlog_recover_buffer_pass2(
2312 struct list_head
*buffer_list
,
2313 struct xlog_recover_item
*item
,
2314 xfs_lsn_t current_lsn
)
2316 xfs_buf_log_format_t
*buf_f
= item
->ri_buf
[0].i_addr
;
2317 xfs_mount_t
*mp
= log
->l_mp
;
2324 * In this pass we only want to recover all the buffers which have
2325 * not been cancelled and are not cancellation buffers themselves.
2327 if (xlog_check_buffer_cancelled(log
, buf_f
->blf_blkno
,
2328 buf_f
->blf_len
, buf_f
->blf_flags
)) {
2329 trace_xfs_log_recover_buf_cancel(log
, buf_f
);
2333 trace_xfs_log_recover_buf_recover(log
, buf_f
);
2336 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
)
2337 buf_flags
|= XBF_UNMAPPED
;
2339 bp
= xfs_buf_read(mp
->m_ddev_targp
, buf_f
->blf_blkno
, buf_f
->blf_len
,
2343 error
= bp
->b_error
;
2345 xfs_buf_ioerror_alert(bp
, "xlog_recover_do..(read#1)");
2350 * Recover the buffer only if we get an LSN from it and it's less than
2351 * the lsn of the transaction we are replaying.
2353 * Note that we have to be extremely careful of readahead here.
2354 * Readahead does not attach verfiers to the buffers so if we don't
2355 * actually do any replay after readahead because of the LSN we found
2356 * in the buffer if more recent than that current transaction then we
2357 * need to attach the verifier directly. Failure to do so can lead to
2358 * future recovery actions (e.g. EFI and unlinked list recovery) can
2359 * operate on the buffers and they won't get the verifier attached. This
2360 * can lead to blocks on disk having the correct content but a stale
2363 * It is safe to assume these clean buffers are currently up to date.
2364 * If the buffer is dirtied by a later transaction being replayed, then
2365 * the verifier will be reset to match whatever recover turns that
2368 lsn
= xlog_recover_get_buf_lsn(mp
, bp
);
2369 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
2370 xlog_recover_validate_buf_type(mp
, bp
, buf_f
);
2374 if (buf_f
->blf_flags
& XFS_BLF_INODE_BUF
) {
2375 error
= xlog_recover_do_inode_buffer(mp
, item
, bp
, buf_f
);
2378 } else if (buf_f
->blf_flags
&
2379 (XFS_BLF_UDQUOT_BUF
|XFS_BLF_PDQUOT_BUF
|XFS_BLF_GDQUOT_BUF
)) {
2382 dirty
= xlog_recover_do_dquot_buffer(mp
, log
, item
, bp
, buf_f
);
2386 xlog_recover_do_reg_buffer(mp
, item
, bp
, buf_f
);
2390 * Perform delayed write on the buffer. Asynchronous writes will be
2391 * slower when taking into account all the buffers to be flushed.
2393 * Also make sure that only inode buffers with good sizes stay in
2394 * the buffer cache. The kernel moves inodes in buffers of 1 block
2395 * or mp->m_inode_cluster_size bytes, whichever is bigger. The inode
2396 * buffers in the log can be a different size if the log was generated
2397 * by an older kernel using unclustered inode buffers or a newer kernel
2398 * running with a different inode cluster size. Regardless, if the
2399 * the inode buffer size isn't MAX(blocksize, mp->m_inode_cluster_size)
2400 * for *our* value of mp->m_inode_cluster_size, then we need to keep
2401 * the buffer out of the buffer cache so that the buffer won't
2402 * overlap with future reads of those inodes.
2404 if (XFS_DINODE_MAGIC
==
2405 be16_to_cpu(*((__be16
*)xfs_buf_offset(bp
, 0))) &&
2406 (BBTOB(bp
->b_io_length
) != MAX(log
->l_mp
->m_sb
.sb_blocksize
,
2407 (__uint32_t
)log
->l_mp
->m_inode_cluster_size
))) {
2409 error
= xfs_bwrite(bp
);
2411 ASSERT(bp
->b_target
->bt_mount
== mp
);
2412 bp
->b_iodone
= xlog_recover_iodone
;
2413 xfs_buf_delwri_queue(bp
, buffer_list
);
2422 * Inode fork owner changes
2424 * If we have been told that we have to reparent the inode fork, it's because an
2425 * extent swap operation on a CRC enabled filesystem has been done and we are
2426 * replaying it. We need to walk the BMBT of the appropriate fork and change the
2429 * The complexity here is that we don't have an inode context to work with, so
2430 * after we've replayed the inode we need to instantiate one. This is where the
2433 * We are in the middle of log recovery, so we can't run transactions. That
2434 * means we cannot use cache coherent inode instantiation via xfs_iget(), as
2435 * that will result in the corresponding iput() running the inode through
2436 * xfs_inactive(). If we've just replayed an inode core that changes the link
2437 * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
2438 * transactions (bad!).
2440 * So, to avoid this, we instantiate an inode directly from the inode core we've
2441 * just recovered. We have the buffer still locked, and all we really need to
2442 * instantiate is the inode core and the forks being modified. We can do this
2443 * manually, then run the inode btree owner change, and then tear down the
2444 * xfs_inode without having to run any transactions at all.
2446 * Also, because we don't have a transaction context available here but need to
2447 * gather all the buffers we modify for writeback so we pass the buffer_list
2448 * instead for the operation to use.
2452 xfs_recover_inode_owner_change(
2453 struct xfs_mount
*mp
,
2454 struct xfs_dinode
*dip
,
2455 struct xfs_inode_log_format
*in_f
,
2456 struct list_head
*buffer_list
)
2458 struct xfs_inode
*ip
;
2461 ASSERT(in_f
->ilf_fields
& (XFS_ILOG_DOWNER
|XFS_ILOG_AOWNER
));
2463 ip
= xfs_inode_alloc(mp
, in_f
->ilf_ino
);
2467 /* instantiate the inode */
2468 xfs_dinode_from_disk(&ip
->i_d
, dip
);
2469 ASSERT(ip
->i_d
.di_version
>= 3);
2471 error
= xfs_iformat_fork(ip
, dip
);
2476 if (in_f
->ilf_fields
& XFS_ILOG_DOWNER
) {
2477 ASSERT(in_f
->ilf_fields
& XFS_ILOG_DBROOT
);
2478 error
= xfs_bmbt_change_owner(NULL
, ip
, XFS_DATA_FORK
,
2479 ip
->i_ino
, buffer_list
);
2484 if (in_f
->ilf_fields
& XFS_ILOG_AOWNER
) {
2485 ASSERT(in_f
->ilf_fields
& XFS_ILOG_ABROOT
);
2486 error
= xfs_bmbt_change_owner(NULL
, ip
, XFS_ATTR_FORK
,
2487 ip
->i_ino
, buffer_list
);
2498 xlog_recover_inode_pass2(
2500 struct list_head
*buffer_list
,
2501 struct xlog_recover_item
*item
,
2502 xfs_lsn_t current_lsn
)
2504 xfs_inode_log_format_t
*in_f
;
2505 xfs_mount_t
*mp
= log
->l_mp
;
2514 xfs_icdinode_t
*dicp
;
2518 if (item
->ri_buf
[0].i_len
== sizeof(xfs_inode_log_format_t
)) {
2519 in_f
= item
->ri_buf
[0].i_addr
;
2521 in_f
= kmem_alloc(sizeof(xfs_inode_log_format_t
), KM_SLEEP
);
2523 error
= xfs_inode_item_format_convert(&item
->ri_buf
[0], in_f
);
2529 * Inode buffers can be freed, look out for it,
2530 * and do not replay the inode.
2532 if (xlog_check_buffer_cancelled(log
, in_f
->ilf_blkno
,
2533 in_f
->ilf_len
, 0)) {
2535 trace_xfs_log_recover_inode_cancel(log
, in_f
);
2538 trace_xfs_log_recover_inode_recover(log
, in_f
);
2540 bp
= xfs_buf_read(mp
->m_ddev_targp
, in_f
->ilf_blkno
, in_f
->ilf_len
, 0,
2541 &xfs_inode_buf_ops
);
2546 error
= bp
->b_error
;
2548 xfs_buf_ioerror_alert(bp
, "xlog_recover_do..(read#2)");
2551 ASSERT(in_f
->ilf_fields
& XFS_ILOG_CORE
);
2552 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
, in_f
->ilf_boffset
);
2555 * Make sure the place we're flushing out to really looks
2558 if (unlikely(dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
))) {
2560 "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
2561 __func__
, dip
, bp
, in_f
->ilf_ino
);
2562 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
2563 XFS_ERRLEVEL_LOW
, mp
);
2564 error
= -EFSCORRUPTED
;
2567 dicp
= item
->ri_buf
[1].i_addr
;
2568 if (unlikely(dicp
->di_magic
!= XFS_DINODE_MAGIC
)) {
2570 "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
2571 __func__
, item
, in_f
->ilf_ino
);
2572 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
2573 XFS_ERRLEVEL_LOW
, mp
);
2574 error
= -EFSCORRUPTED
;
2579 * If the inode has an LSN in it, recover the inode only if it's less
2580 * than the lsn of the transaction we are replaying. Note: we still
2581 * need to replay an owner change even though the inode is more recent
2582 * than the transaction as there is no guarantee that all the btree
2583 * blocks are more recent than this transaction, too.
2585 if (dip
->di_version
>= 3) {
2586 xfs_lsn_t lsn
= be64_to_cpu(dip
->di_lsn
);
2588 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
2589 trace_xfs_log_recover_inode_skip(log
, in_f
);
2591 goto out_owner_change
;
2596 * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
2597 * are transactional and if ordering is necessary we can determine that
2598 * more accurately by the LSN field in the V3 inode core. Don't trust
2599 * the inode versions we might be changing them here - use the
2600 * superblock flag to determine whether we need to look at di_flushiter
2601 * to skip replay when the on disk inode is newer than the log one
2603 if (!xfs_sb_version_hascrc(&mp
->m_sb
) &&
2604 dicp
->di_flushiter
< be16_to_cpu(dip
->di_flushiter
)) {
2606 * Deal with the wrap case, DI_MAX_FLUSH is less
2607 * than smaller numbers
2609 if (be16_to_cpu(dip
->di_flushiter
) == DI_MAX_FLUSH
&&
2610 dicp
->di_flushiter
< (DI_MAX_FLUSH
>> 1)) {
2613 trace_xfs_log_recover_inode_skip(log
, in_f
);
2619 /* Take the opportunity to reset the flush iteration count */
2620 dicp
->di_flushiter
= 0;
2622 if (unlikely(S_ISREG(dicp
->di_mode
))) {
2623 if ((dicp
->di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
2624 (dicp
->di_format
!= XFS_DINODE_FMT_BTREE
)) {
2625 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
2626 XFS_ERRLEVEL_LOW
, mp
, dicp
);
2628 "%s: Bad regular inode log record, rec ptr 0x%p, "
2629 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2630 __func__
, item
, dip
, bp
, in_f
->ilf_ino
);
2631 error
= -EFSCORRUPTED
;
2634 } else if (unlikely(S_ISDIR(dicp
->di_mode
))) {
2635 if ((dicp
->di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
2636 (dicp
->di_format
!= XFS_DINODE_FMT_BTREE
) &&
2637 (dicp
->di_format
!= XFS_DINODE_FMT_LOCAL
)) {
2638 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
2639 XFS_ERRLEVEL_LOW
, mp
, dicp
);
2641 "%s: Bad dir inode log record, rec ptr 0x%p, "
2642 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
2643 __func__
, item
, dip
, bp
, in_f
->ilf_ino
);
2644 error
= -EFSCORRUPTED
;
2648 if (unlikely(dicp
->di_nextents
+ dicp
->di_anextents
> dicp
->di_nblocks
)){
2649 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
2650 XFS_ERRLEVEL_LOW
, mp
, dicp
);
2652 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2653 "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
2654 __func__
, item
, dip
, bp
, in_f
->ilf_ino
,
2655 dicp
->di_nextents
+ dicp
->di_anextents
,
2657 error
= -EFSCORRUPTED
;
2660 if (unlikely(dicp
->di_forkoff
> mp
->m_sb
.sb_inodesize
)) {
2661 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
2662 XFS_ERRLEVEL_LOW
, mp
, dicp
);
2664 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
2665 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__
,
2666 item
, dip
, bp
, in_f
->ilf_ino
, dicp
->di_forkoff
);
2667 error
= -EFSCORRUPTED
;
2670 isize
= xfs_icdinode_size(dicp
->di_version
);
2671 if (unlikely(item
->ri_buf
[1].i_len
> isize
)) {
2672 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
2673 XFS_ERRLEVEL_LOW
, mp
, dicp
);
2675 "%s: Bad inode log record length %d, rec ptr 0x%p",
2676 __func__
, item
->ri_buf
[1].i_len
, item
);
2677 error
= -EFSCORRUPTED
;
2681 /* The core is in in-core format */
2682 xfs_dinode_to_disk(dip
, dicp
);
2684 /* the rest is in on-disk format */
2685 if (item
->ri_buf
[1].i_len
> isize
) {
2686 memcpy((char *)dip
+ isize
,
2687 item
->ri_buf
[1].i_addr
+ isize
,
2688 item
->ri_buf
[1].i_len
- isize
);
2691 fields
= in_f
->ilf_fields
;
2692 switch (fields
& (XFS_ILOG_DEV
| XFS_ILOG_UUID
)) {
2694 xfs_dinode_put_rdev(dip
, in_f
->ilf_u
.ilfu_rdev
);
2697 memcpy(XFS_DFORK_DPTR(dip
),
2698 &in_f
->ilf_u
.ilfu_uuid
,
2703 if (in_f
->ilf_size
== 2)
2704 goto out_owner_change
;
2705 len
= item
->ri_buf
[2].i_len
;
2706 src
= item
->ri_buf
[2].i_addr
;
2707 ASSERT(in_f
->ilf_size
<= 4);
2708 ASSERT((in_f
->ilf_size
== 3) || (fields
& XFS_ILOG_AFORK
));
2709 ASSERT(!(fields
& XFS_ILOG_DFORK
) ||
2710 (len
== in_f
->ilf_dsize
));
2712 switch (fields
& XFS_ILOG_DFORK
) {
2713 case XFS_ILOG_DDATA
:
2715 memcpy(XFS_DFORK_DPTR(dip
), src
, len
);
2718 case XFS_ILOG_DBROOT
:
2719 xfs_bmbt_to_bmdr(mp
, (struct xfs_btree_block
*)src
, len
,
2720 (xfs_bmdr_block_t
*)XFS_DFORK_DPTR(dip
),
2721 XFS_DFORK_DSIZE(dip
, mp
));
2726 * There are no data fork flags set.
2728 ASSERT((fields
& XFS_ILOG_DFORK
) == 0);
2733 * If we logged any attribute data, recover it. There may or
2734 * may not have been any other non-core data logged in this
2737 if (in_f
->ilf_fields
& XFS_ILOG_AFORK
) {
2738 if (in_f
->ilf_fields
& XFS_ILOG_DFORK
) {
2743 len
= item
->ri_buf
[attr_index
].i_len
;
2744 src
= item
->ri_buf
[attr_index
].i_addr
;
2745 ASSERT(len
== in_f
->ilf_asize
);
2747 switch (in_f
->ilf_fields
& XFS_ILOG_AFORK
) {
2748 case XFS_ILOG_ADATA
:
2750 dest
= XFS_DFORK_APTR(dip
);
2751 ASSERT(len
<= XFS_DFORK_ASIZE(dip
, mp
));
2752 memcpy(dest
, src
, len
);
2755 case XFS_ILOG_ABROOT
:
2756 dest
= XFS_DFORK_APTR(dip
);
2757 xfs_bmbt_to_bmdr(mp
, (struct xfs_btree_block
*)src
,
2758 len
, (xfs_bmdr_block_t
*)dest
,
2759 XFS_DFORK_ASIZE(dip
, mp
));
2763 xfs_warn(log
->l_mp
, "%s: Invalid flag", __func__
);
2771 if (in_f
->ilf_fields
& (XFS_ILOG_DOWNER
|XFS_ILOG_AOWNER
))
2772 error
= xfs_recover_inode_owner_change(mp
, dip
, in_f
,
2774 /* re-generate the checksum. */
2775 xfs_dinode_calc_crc(log
->l_mp
, dip
);
2777 ASSERT(bp
->b_target
->bt_mount
== mp
);
2778 bp
->b_iodone
= xlog_recover_iodone
;
2779 xfs_buf_delwri_queue(bp
, buffer_list
);
2790 * Recover QUOTAOFF records. We simply make a note of it in the xlog
2791 * structure, so that we know not to do any dquot item or dquot buffer recovery,
2795 xlog_recover_quotaoff_pass1(
2797 struct xlog_recover_item
*item
)
2799 xfs_qoff_logformat_t
*qoff_f
= item
->ri_buf
[0].i_addr
;
2803 * The logitem format's flag tells us if this was user quotaoff,
2804 * group/project quotaoff or both.
2806 if (qoff_f
->qf_flags
& XFS_UQUOTA_ACCT
)
2807 log
->l_quotaoffs_flag
|= XFS_DQ_USER
;
2808 if (qoff_f
->qf_flags
& XFS_PQUOTA_ACCT
)
2809 log
->l_quotaoffs_flag
|= XFS_DQ_PROJ
;
2810 if (qoff_f
->qf_flags
& XFS_GQUOTA_ACCT
)
2811 log
->l_quotaoffs_flag
|= XFS_DQ_GROUP
;
2817 * Recover a dquot record
2820 xlog_recover_dquot_pass2(
2822 struct list_head
*buffer_list
,
2823 struct xlog_recover_item
*item
,
2824 xfs_lsn_t current_lsn
)
2826 xfs_mount_t
*mp
= log
->l_mp
;
2828 struct xfs_disk_dquot
*ddq
, *recddq
;
2830 xfs_dq_logformat_t
*dq_f
;
2835 * Filesystems are required to send in quota flags at mount time.
2837 if (mp
->m_qflags
== 0)
2840 recddq
= item
->ri_buf
[1].i_addr
;
2841 if (recddq
== NULL
) {
2842 xfs_alert(log
->l_mp
, "NULL dquot in %s.", __func__
);
2845 if (item
->ri_buf
[1].i_len
< sizeof(xfs_disk_dquot_t
)) {
2846 xfs_alert(log
->l_mp
, "dquot too small (%d) in %s.",
2847 item
->ri_buf
[1].i_len
, __func__
);
2852 * This type of quotas was turned off, so ignore this record.
2854 type
= recddq
->d_flags
& (XFS_DQ_USER
| XFS_DQ_PROJ
| XFS_DQ_GROUP
);
2856 if (log
->l_quotaoffs_flag
& type
)
2860 * At this point we know that quota was _not_ turned off.
2861 * Since the mount flags are not indicating to us otherwise, this
2862 * must mean that quota is on, and the dquot needs to be replayed.
2863 * Remember that we may not have fully recovered the superblock yet,
2864 * so we can't do the usual trick of looking at the SB quota bits.
2866 * The other possibility, of course, is that the quota subsystem was
2867 * removed since the last mount - ENOSYS.
2869 dq_f
= item
->ri_buf
[0].i_addr
;
2871 error
= xfs_dqcheck(mp
, recddq
, dq_f
->qlf_id
, 0, XFS_QMOPT_DOWARN
,
2872 "xlog_recover_dquot_pass2 (log copy)");
2875 ASSERT(dq_f
->qlf_len
== 1);
2878 * At this point we are assuming that the dquots have been allocated
2879 * and hence the buffer has valid dquots stamped in it. It should,
2880 * therefore, pass verifier validation. If the dquot is bad, then the
2881 * we'll return an error here, so we don't need to specifically check
2882 * the dquot in the buffer after the verifier has run.
2884 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dq_f
->qlf_blkno
,
2885 XFS_FSB_TO_BB(mp
, dq_f
->qlf_len
), 0, &bp
,
2886 &xfs_dquot_buf_ops
);
2891 ddq
= (xfs_disk_dquot_t
*)xfs_buf_offset(bp
, dq_f
->qlf_boffset
);
2894 * If the dquot has an LSN in it, recover the dquot only if it's less
2895 * than the lsn of the transaction we are replaying.
2897 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
2898 struct xfs_dqblk
*dqb
= (struct xfs_dqblk
*)ddq
;
2899 xfs_lsn_t lsn
= be64_to_cpu(dqb
->dd_lsn
);
2901 if (lsn
&& lsn
!= -1 && XFS_LSN_CMP(lsn
, current_lsn
) >= 0) {
2906 memcpy(ddq
, recddq
, item
->ri_buf
[1].i_len
);
2907 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
2908 xfs_update_cksum((char *)ddq
, sizeof(struct xfs_dqblk
),
2912 ASSERT(dq_f
->qlf_size
== 2);
2913 ASSERT(bp
->b_target
->bt_mount
== mp
);
2914 bp
->b_iodone
= xlog_recover_iodone
;
2915 xfs_buf_delwri_queue(bp
, buffer_list
);
2923 * This routine is called to create an in-core extent free intent
2924 * item from the efi format structure which was logged on disk.
2925 * It allocates an in-core efi, copies the extents from the format
2926 * structure into it, and adds the efi to the AIL with the given
2930 xlog_recover_efi_pass2(
2932 struct xlog_recover_item
*item
,
2936 xfs_mount_t
*mp
= log
->l_mp
;
2937 xfs_efi_log_item_t
*efip
;
2938 xfs_efi_log_format_t
*efi_formatp
;
2940 efi_formatp
= item
->ri_buf
[0].i_addr
;
2942 efip
= xfs_efi_init(mp
, efi_formatp
->efi_nextents
);
2943 if ((error
= xfs_efi_copy_format(&(item
->ri_buf
[0]),
2944 &(efip
->efi_format
)))) {
2945 xfs_efi_item_free(efip
);
2948 atomic_set(&efip
->efi_next_extent
, efi_formatp
->efi_nextents
);
2950 spin_lock(&log
->l_ailp
->xa_lock
);
2952 * xfs_trans_ail_update() drops the AIL lock.
2954 xfs_trans_ail_update(log
->l_ailp
, &efip
->efi_item
, lsn
);
2960 * This routine is called when an efd format structure is found in
2961 * a committed transaction in the log. It's purpose is to cancel
2962 * the corresponding efi if it was still in the log. To do this
2963 * it searches the AIL for the efi with an id equal to that in the
2964 * efd format structure. If we find it, we remove the efi from the
2968 xlog_recover_efd_pass2(
2970 struct xlog_recover_item
*item
)
2972 xfs_efd_log_format_t
*efd_formatp
;
2973 xfs_efi_log_item_t
*efip
= NULL
;
2974 xfs_log_item_t
*lip
;
2976 struct xfs_ail_cursor cur
;
2977 struct xfs_ail
*ailp
= log
->l_ailp
;
2979 efd_formatp
= item
->ri_buf
[0].i_addr
;
2980 ASSERT((item
->ri_buf
[0].i_len
== (sizeof(xfs_efd_log_format_32_t
) +
2981 ((efd_formatp
->efd_nextents
- 1) * sizeof(xfs_extent_32_t
)))) ||
2982 (item
->ri_buf
[0].i_len
== (sizeof(xfs_efd_log_format_64_t
) +
2983 ((efd_formatp
->efd_nextents
- 1) * sizeof(xfs_extent_64_t
)))));
2984 efi_id
= efd_formatp
->efd_efi_id
;
2987 * Search for the efi with the id in the efd format structure
2990 spin_lock(&ailp
->xa_lock
);
2991 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
2992 while (lip
!= NULL
) {
2993 if (lip
->li_type
== XFS_LI_EFI
) {
2994 efip
= (xfs_efi_log_item_t
*)lip
;
2995 if (efip
->efi_format
.efi_id
== efi_id
) {
2997 * xfs_trans_ail_delete() drops the
3000 xfs_trans_ail_delete(ailp
, lip
,
3001 SHUTDOWN_CORRUPT_INCORE
);
3002 xfs_efi_item_free(efip
);
3003 spin_lock(&ailp
->xa_lock
);
3007 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3009 xfs_trans_ail_cursor_done(&cur
);
3010 spin_unlock(&ailp
->xa_lock
);
3016 * This routine is called when an inode create format structure is found in a
3017 * committed transaction in the log. It's purpose is to initialise the inodes
3018 * being allocated on disk. This requires us to get inode cluster buffers that
3019 * match the range to be intialised, stamped with inode templates and written
3020 * by delayed write so that subsequent modifications will hit the cached buffer
3021 * and only need writing out at the end of recovery.
3024 xlog_recover_do_icreate_pass2(
3026 struct list_head
*buffer_list
,
3027 xlog_recover_item_t
*item
)
3029 struct xfs_mount
*mp
= log
->l_mp
;
3030 struct xfs_icreate_log
*icl
;
3031 xfs_agnumber_t agno
;
3032 xfs_agblock_t agbno
;
3035 xfs_agblock_t length
;
3037 icl
= (struct xfs_icreate_log
*)item
->ri_buf
[0].i_addr
;
3038 if (icl
->icl_type
!= XFS_LI_ICREATE
) {
3039 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad type");
3043 if (icl
->icl_size
!= 1) {
3044 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad icl size");
3048 agno
= be32_to_cpu(icl
->icl_ag
);
3049 if (agno
>= mp
->m_sb
.sb_agcount
) {
3050 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad agno");
3053 agbno
= be32_to_cpu(icl
->icl_agbno
);
3054 if (!agbno
|| agbno
== NULLAGBLOCK
|| agbno
>= mp
->m_sb
.sb_agblocks
) {
3055 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad agbno");
3058 isize
= be32_to_cpu(icl
->icl_isize
);
3059 if (isize
!= mp
->m_sb
.sb_inodesize
) {
3060 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad isize");
3063 count
= be32_to_cpu(icl
->icl_count
);
3065 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad count");
3068 length
= be32_to_cpu(icl
->icl_length
);
3069 if (!length
|| length
>= mp
->m_sb
.sb_agblocks
) {
3070 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad length");
3074 /* existing allocation is fixed value */
3075 ASSERT(count
== mp
->m_ialloc_inos
);
3076 ASSERT(length
== mp
->m_ialloc_blks
);
3077 if (count
!= mp
->m_ialloc_inos
||
3078 length
!= mp
->m_ialloc_blks
) {
3079 xfs_warn(log
->l_mp
, "xlog_recover_do_icreate_trans: bad count 2");
3084 * Inode buffers can be freed. Do not replay the inode initialisation as
3085 * we could be overwriting something written after this inode buffer was
3088 * XXX: we need to iterate all buffers and only init those that are not
3089 * cancelled. I think that a more fine grained factoring of
3090 * xfs_ialloc_inode_init may be appropriate here to enable this to be
3093 if (xlog_check_buffer_cancelled(log
,
3094 XFS_AGB_TO_DADDR(mp
, agno
, agbno
), length
, 0))
3097 xfs_ialloc_inode_init(mp
, NULL
, buffer_list
, agno
, agbno
, length
,
3098 be32_to_cpu(icl
->icl_gen
));
3103 xlog_recover_buffer_ra_pass2(
3105 struct xlog_recover_item
*item
)
3107 struct xfs_buf_log_format
*buf_f
= item
->ri_buf
[0].i_addr
;
3108 struct xfs_mount
*mp
= log
->l_mp
;
3110 if (xlog_peek_buffer_cancelled(log
, buf_f
->blf_blkno
,
3111 buf_f
->blf_len
, buf_f
->blf_flags
)) {
3115 xfs_buf_readahead(mp
->m_ddev_targp
, buf_f
->blf_blkno
,
3116 buf_f
->blf_len
, NULL
);
3120 xlog_recover_inode_ra_pass2(
3122 struct xlog_recover_item
*item
)
3124 struct xfs_inode_log_format ilf_buf
;
3125 struct xfs_inode_log_format
*ilfp
;
3126 struct xfs_mount
*mp
= log
->l_mp
;
3129 if (item
->ri_buf
[0].i_len
== sizeof(struct xfs_inode_log_format
)) {
3130 ilfp
= item
->ri_buf
[0].i_addr
;
3133 memset(ilfp
, 0, sizeof(*ilfp
));
3134 error
= xfs_inode_item_format_convert(&item
->ri_buf
[0], ilfp
);
3139 if (xlog_peek_buffer_cancelled(log
, ilfp
->ilf_blkno
, ilfp
->ilf_len
, 0))
3142 xfs_buf_readahead(mp
->m_ddev_targp
, ilfp
->ilf_blkno
,
3143 ilfp
->ilf_len
, &xfs_inode_buf_ra_ops
);
3147 xlog_recover_dquot_ra_pass2(
3149 struct xlog_recover_item
*item
)
3151 struct xfs_mount
*mp
= log
->l_mp
;
3152 struct xfs_disk_dquot
*recddq
;
3153 struct xfs_dq_logformat
*dq_f
;
3157 if (mp
->m_qflags
== 0)
3160 recddq
= item
->ri_buf
[1].i_addr
;
3163 if (item
->ri_buf
[1].i_len
< sizeof(struct xfs_disk_dquot
))
3166 type
= recddq
->d_flags
& (XFS_DQ_USER
| XFS_DQ_PROJ
| XFS_DQ_GROUP
);
3168 if (log
->l_quotaoffs_flag
& type
)
3171 dq_f
= item
->ri_buf
[0].i_addr
;
3173 ASSERT(dq_f
->qlf_len
== 1);
3175 xfs_buf_readahead(mp
->m_ddev_targp
, dq_f
->qlf_blkno
,
3176 XFS_FSB_TO_BB(mp
, dq_f
->qlf_len
), NULL
);
3180 xlog_recover_ra_pass2(
3182 struct xlog_recover_item
*item
)
3184 switch (ITEM_TYPE(item
)) {
3186 xlog_recover_buffer_ra_pass2(log
, item
);
3189 xlog_recover_inode_ra_pass2(log
, item
);
3192 xlog_recover_dquot_ra_pass2(log
, item
);
3196 case XFS_LI_QUOTAOFF
:
3203 xlog_recover_commit_pass1(
3205 struct xlog_recover
*trans
,
3206 struct xlog_recover_item
*item
)
3208 trace_xfs_log_recover_item_recover(log
, trans
, item
, XLOG_RECOVER_PASS1
);
3210 switch (ITEM_TYPE(item
)) {
3212 return xlog_recover_buffer_pass1(log
, item
);
3213 case XFS_LI_QUOTAOFF
:
3214 return xlog_recover_quotaoff_pass1(log
, item
);
3219 case XFS_LI_ICREATE
:
3220 /* nothing to do in pass 1 */
3223 xfs_warn(log
->l_mp
, "%s: invalid item type (%d)",
3224 __func__
, ITEM_TYPE(item
));
3231 xlog_recover_commit_pass2(
3233 struct xlog_recover
*trans
,
3234 struct list_head
*buffer_list
,
3235 struct xlog_recover_item
*item
)
3237 trace_xfs_log_recover_item_recover(log
, trans
, item
, XLOG_RECOVER_PASS2
);
3239 switch (ITEM_TYPE(item
)) {
3241 return xlog_recover_buffer_pass2(log
, buffer_list
, item
,
3244 return xlog_recover_inode_pass2(log
, buffer_list
, item
,
3247 return xlog_recover_efi_pass2(log
, item
, trans
->r_lsn
);
3249 return xlog_recover_efd_pass2(log
, item
);
3251 return xlog_recover_dquot_pass2(log
, buffer_list
, item
,
3253 case XFS_LI_ICREATE
:
3254 return xlog_recover_do_icreate_pass2(log
, buffer_list
, item
);
3255 case XFS_LI_QUOTAOFF
:
3256 /* nothing to do in pass2 */
3259 xfs_warn(log
->l_mp
, "%s: invalid item type (%d)",
3260 __func__
, ITEM_TYPE(item
));
3267 xlog_recover_items_pass2(
3269 struct xlog_recover
*trans
,
3270 struct list_head
*buffer_list
,
3271 struct list_head
*item_list
)
3273 struct xlog_recover_item
*item
;
3276 list_for_each_entry(item
, item_list
, ri_list
) {
3277 error
= xlog_recover_commit_pass2(log
, trans
,
3287 * Perform the transaction.
3289 * If the transaction modifies a buffer or inode, do it now. Otherwise,
3290 * EFIs and EFDs get queued up by adding entries into the AIL for them.
3293 xlog_recover_commit_trans(
3295 struct xlog_recover
*trans
,
3300 int items_queued
= 0;
3301 struct xlog_recover_item
*item
;
3302 struct xlog_recover_item
*next
;
3303 LIST_HEAD (buffer_list
);
3304 LIST_HEAD (ra_list
);
3305 LIST_HEAD (done_list
);
3307 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
3309 hlist_del(&trans
->r_list
);
3311 error
= xlog_recover_reorder_trans(log
, trans
, pass
);
3315 list_for_each_entry_safe(item
, next
, &trans
->r_itemq
, ri_list
) {
3317 case XLOG_RECOVER_PASS1
:
3318 error
= xlog_recover_commit_pass1(log
, trans
, item
);
3320 case XLOG_RECOVER_PASS2
:
3321 xlog_recover_ra_pass2(log
, item
);
3322 list_move_tail(&item
->ri_list
, &ra_list
);
3324 if (items_queued
>= XLOG_RECOVER_COMMIT_QUEUE_MAX
) {
3325 error
= xlog_recover_items_pass2(log
, trans
,
3326 &buffer_list
, &ra_list
);
3327 list_splice_tail_init(&ra_list
, &done_list
);
3341 if (!list_empty(&ra_list
)) {
3343 error
= xlog_recover_items_pass2(log
, trans
,
3344 &buffer_list
, &ra_list
);
3345 list_splice_tail_init(&ra_list
, &done_list
);
3348 if (!list_empty(&done_list
))
3349 list_splice_init(&done_list
, &trans
->r_itemq
);
3351 error2
= xfs_buf_delwri_submit(&buffer_list
);
3352 return error
? error
: error2
;
3356 xlog_recover_add_item(
3357 struct list_head
*head
)
3359 xlog_recover_item_t
*item
;
3361 item
= kmem_zalloc(sizeof(xlog_recover_item_t
), KM_SLEEP
);
3362 INIT_LIST_HEAD(&item
->ri_list
);
3363 list_add_tail(&item
->ri_list
, head
);
3367 xlog_recover_add_to_cont_trans(
3369 struct xlog_recover
*trans
,
3373 xlog_recover_item_t
*item
;
3374 xfs_caddr_t ptr
, old_ptr
;
3377 if (list_empty(&trans
->r_itemq
)) {
3378 /* finish copying rest of trans header */
3379 xlog_recover_add_item(&trans
->r_itemq
);
3380 ptr
= (xfs_caddr_t
) &trans
->r_theader
+
3381 sizeof(xfs_trans_header_t
) - len
;
3382 memcpy(ptr
, dp
, len
);
3385 /* take the tail entry */
3386 item
= list_entry(trans
->r_itemq
.prev
, xlog_recover_item_t
, ri_list
);
3388 old_ptr
= item
->ri_buf
[item
->ri_cnt
-1].i_addr
;
3389 old_len
= item
->ri_buf
[item
->ri_cnt
-1].i_len
;
3391 ptr
= kmem_realloc(old_ptr
, len
+old_len
, old_len
, KM_SLEEP
);
3392 memcpy(&ptr
[old_len
], dp
, len
);
3393 item
->ri_buf
[item
->ri_cnt
-1].i_len
+= len
;
3394 item
->ri_buf
[item
->ri_cnt
-1].i_addr
= ptr
;
3395 trace_xfs_log_recover_item_add_cont(log
, trans
, item
, 0);
3400 * The next region to add is the start of a new region. It could be
3401 * a whole region or it could be the first part of a new region. Because
3402 * of this, the assumption here is that the type and size fields of all
3403 * format structures fit into the first 32 bits of the structure.
3405 * This works because all regions must be 32 bit aligned. Therefore, we
3406 * either have both fields or we have neither field. In the case we have
3407 * neither field, the data part of the region is zero length. We only have
3408 * a log_op_header and can throw away the header since a new one will appear
3409 * later. If we have at least 4 bytes, then we can determine how many regions
3410 * will appear in the current log item.
3413 xlog_recover_add_to_trans(
3415 struct xlog_recover
*trans
,
3419 xfs_inode_log_format_t
*in_f
; /* any will do */
3420 xlog_recover_item_t
*item
;
3425 if (list_empty(&trans
->r_itemq
)) {
3426 /* we need to catch log corruptions here */
3427 if (*(uint
*)dp
!= XFS_TRANS_HEADER_MAGIC
) {
3428 xfs_warn(log
->l_mp
, "%s: bad header magic number",
3433 if (len
== sizeof(xfs_trans_header_t
))
3434 xlog_recover_add_item(&trans
->r_itemq
);
3435 memcpy(&trans
->r_theader
, dp
, len
);
3439 ptr
= kmem_alloc(len
, KM_SLEEP
);
3440 memcpy(ptr
, dp
, len
);
3441 in_f
= (xfs_inode_log_format_t
*)ptr
;
3443 /* take the tail entry */
3444 item
= list_entry(trans
->r_itemq
.prev
, xlog_recover_item_t
, ri_list
);
3445 if (item
->ri_total
!= 0 &&
3446 item
->ri_total
== item
->ri_cnt
) {
3447 /* tail item is in use, get a new one */
3448 xlog_recover_add_item(&trans
->r_itemq
);
3449 item
= list_entry(trans
->r_itemq
.prev
,
3450 xlog_recover_item_t
, ri_list
);
3453 if (item
->ri_total
== 0) { /* first region to be added */
3454 if (in_f
->ilf_size
== 0 ||
3455 in_f
->ilf_size
> XLOG_MAX_REGIONS_IN_ITEM
) {
3457 "bad number of regions (%d) in inode log format",
3464 item
->ri_total
= in_f
->ilf_size
;
3466 kmem_zalloc(item
->ri_total
* sizeof(xfs_log_iovec_t
),
3469 ASSERT(item
->ri_total
> item
->ri_cnt
);
3470 /* Description region is ri_buf[0] */
3471 item
->ri_buf
[item
->ri_cnt
].i_addr
= ptr
;
3472 item
->ri_buf
[item
->ri_cnt
].i_len
= len
;
3474 trace_xfs_log_recover_item_add(log
, trans
, item
, 0);
3479 * Free up any resources allocated by the transaction
3481 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
3484 xlog_recover_free_trans(
3485 struct xlog_recover
*trans
)
3487 xlog_recover_item_t
*item
, *n
;
3490 list_for_each_entry_safe(item
, n
, &trans
->r_itemq
, ri_list
) {
3491 /* Free the regions in the item. */
3492 list_del(&item
->ri_list
);
3493 for (i
= 0; i
< item
->ri_cnt
; i
++)
3494 kmem_free(item
->ri_buf
[i
].i_addr
);
3495 /* Free the item itself */
3496 kmem_free(item
->ri_buf
);
3499 /* Free the transaction recover structure */
3504 * On error or completion, trans is freed.
3507 xlog_recovery_process_trans(
3509 struct xlog_recover
*trans
,
3516 bool freeit
= false;
3518 /* mask off ophdr transaction container flags */
3519 flags
&= ~XLOG_END_TRANS
;
3520 if (flags
& XLOG_WAS_CONT_TRANS
)
3521 flags
&= ~XLOG_CONTINUE_TRANS
;
3524 * Callees must not free the trans structure. We'll decide if we need to
3525 * free it or not based on the operation being done and it's result.
3528 /* expected flag values */
3530 case XLOG_CONTINUE_TRANS
:
3531 error
= xlog_recover_add_to_trans(log
, trans
, dp
, len
);
3533 case XLOG_WAS_CONT_TRANS
:
3534 error
= xlog_recover_add_to_cont_trans(log
, trans
, dp
, len
);
3536 case XLOG_COMMIT_TRANS
:
3537 error
= xlog_recover_commit_trans(log
, trans
, pass
);
3538 /* success or fail, we are now done with this transaction. */
3542 /* unexpected flag values */
3543 case XLOG_UNMOUNT_TRANS
:
3544 /* just skip trans */
3545 xfs_warn(log
->l_mp
, "%s: Unmount LR", __func__
);
3548 case XLOG_START_TRANS
:
3550 xfs_warn(log
->l_mp
, "%s: bad flag 0x%x", __func__
, flags
);
3555 if (error
|| freeit
)
3556 xlog_recover_free_trans(trans
);
3561 * Lookup the transaction recovery structure associated with the ID in the
3562 * current ophdr. If the transaction doesn't exist and the start flag is set in
3563 * the ophdr, then allocate a new transaction for future ID matches to find.
3564 * Either way, return what we found during the lookup - an existing transaction
3567 STATIC
struct xlog_recover
*
3568 xlog_recover_ophdr_to_trans(
3569 struct hlist_head rhash
[],
3570 struct xlog_rec_header
*rhead
,
3571 struct xlog_op_header
*ohead
)
3573 struct xlog_recover
*trans
;
3575 struct hlist_head
*rhp
;
3577 tid
= be32_to_cpu(ohead
->oh_tid
);
3578 rhp
= &rhash
[XLOG_RHASH(tid
)];
3579 hlist_for_each_entry(trans
, rhp
, r_list
) {
3580 if (trans
->r_log_tid
== tid
)
3585 * skip over non-start transaction headers - we could be
3586 * processing slack space before the next transaction starts
3588 if (!(ohead
->oh_flags
& XLOG_START_TRANS
))
3591 ASSERT(be32_to_cpu(ohead
->oh_len
) == 0);
3594 * This is a new transaction so allocate a new recovery container to
3595 * hold the recovery ops that will follow.
3597 trans
= kmem_zalloc(sizeof(struct xlog_recover
), KM_SLEEP
);
3598 trans
->r_log_tid
= tid
;
3599 trans
->r_lsn
= be64_to_cpu(rhead
->h_lsn
);
3600 INIT_LIST_HEAD(&trans
->r_itemq
);
3601 INIT_HLIST_NODE(&trans
->r_list
);
3602 hlist_add_head(&trans
->r_list
, rhp
);
3605 * Nothing more to do for this ophdr. Items to be added to this new
3606 * transaction will be in subsequent ophdr containers.
3612 xlog_recover_process_ophdr(
3614 struct hlist_head rhash
[],
3615 struct xlog_rec_header
*rhead
,
3616 struct xlog_op_header
*ohead
,
3621 struct xlog_recover
*trans
;
3624 /* Do we understand who wrote this op? */
3625 if (ohead
->oh_clientid
!= XFS_TRANSACTION
&&
3626 ohead
->oh_clientid
!= XFS_LOG
) {
3627 xfs_warn(log
->l_mp
, "%s: bad clientid 0x%x",
3628 __func__
, ohead
->oh_clientid
);
3634 * Check the ophdr contains all the data it is supposed to contain.
3636 len
= be32_to_cpu(ohead
->oh_len
);
3637 if (dp
+ len
> end
) {
3638 xfs_warn(log
->l_mp
, "%s: bad length 0x%x", __func__
, len
);
3643 trans
= xlog_recover_ophdr_to_trans(rhash
, rhead
, ohead
);
3645 /* nothing to do, so skip over this ophdr */
3649 return xlog_recovery_process_trans(log
, trans
, dp
, len
,
3650 ohead
->oh_flags
, pass
);
3654 * There are two valid states of the r_state field. 0 indicates that the
3655 * transaction structure is in a normal state. We have either seen the
3656 * start of the transaction or the last operation we added was not a partial
3657 * operation. If the last operation we added to the transaction was a
3658 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
3660 * NOTE: skip LRs with 0 data length.
3663 xlog_recover_process_data(
3665 struct hlist_head rhash
[],
3666 struct xlog_rec_header
*rhead
,
3670 struct xlog_op_header
*ohead
;
3675 end
= dp
+ be32_to_cpu(rhead
->h_len
);
3676 num_logops
= be32_to_cpu(rhead
->h_num_logops
);
3678 /* check the log format matches our own - else we can't recover */
3679 if (xlog_header_check_recover(log
->l_mp
, rhead
))
3682 while ((dp
< end
) && num_logops
) {
3684 ohead
= (struct xlog_op_header
*)dp
;
3685 dp
+= sizeof(*ohead
);
3688 /* errors will abort recovery */
3689 error
= xlog_recover_process_ophdr(log
, rhash
, rhead
, ohead
,
3694 dp
+= be32_to_cpu(ohead
->oh_len
);
3701 * Process an extent free intent item that was recovered from
3702 * the log. We need to free the extents that it describes.
3705 xlog_recover_process_efi(
3707 xfs_efi_log_item_t
*efip
)
3709 xfs_efd_log_item_t
*efdp
;
3714 xfs_fsblock_t startblock_fsb
;
3716 ASSERT(!test_bit(XFS_EFI_RECOVERED
, &efip
->efi_flags
));
3719 * First check the validity of the extents described by the
3720 * EFI. If any are bad, then assume that all are bad and
3721 * just toss the EFI.
3723 for (i
= 0; i
< efip
->efi_format
.efi_nextents
; i
++) {
3724 extp
= &(efip
->efi_format
.efi_extents
[i
]);
3725 startblock_fsb
= XFS_BB_TO_FSB(mp
,
3726 XFS_FSB_TO_DADDR(mp
, extp
->ext_start
));
3727 if ((startblock_fsb
== 0) ||
3728 (extp
->ext_len
== 0) ||
3729 (startblock_fsb
>= mp
->m_sb
.sb_dblocks
) ||
3730 (extp
->ext_len
>= mp
->m_sb
.sb_agblocks
)) {
3732 * This will pull the EFI from the AIL and
3733 * free the memory associated with it.
3735 set_bit(XFS_EFI_RECOVERED
, &efip
->efi_flags
);
3736 xfs_efi_release(efip
, efip
->efi_format
.efi_nextents
);
3741 tp
= xfs_trans_alloc(mp
, 0);
3742 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_itruncate
, 0, 0);
3745 efdp
= xfs_trans_get_efd(tp
, efip
, efip
->efi_format
.efi_nextents
);
3747 for (i
= 0; i
< efip
->efi_format
.efi_nextents
; i
++) {
3748 extp
= &(efip
->efi_format
.efi_extents
[i
]);
3749 error
= xfs_free_extent(tp
, extp
->ext_start
, extp
->ext_len
);
3752 xfs_trans_log_efd_extent(tp
, efdp
, extp
->ext_start
,
3756 set_bit(XFS_EFI_RECOVERED
, &efip
->efi_flags
);
3757 error
= xfs_trans_commit(tp
, 0);
3761 xfs_trans_cancel(tp
, XFS_TRANS_ABORT
);
3766 * When this is called, all of the EFIs which did not have
3767 * corresponding EFDs should be in the AIL. What we do now
3768 * is free the extents associated with each one.
3770 * Since we process the EFIs in normal transactions, they
3771 * will be removed at some point after the commit. This prevents
3772 * us from just walking down the list processing each one.
3773 * We'll use a flag in the EFI to skip those that we've already
3774 * processed and use the AIL iteration mechanism's generation
3775 * count to try to speed this up at least a bit.
3777 * When we start, we know that the EFIs are the only things in
3778 * the AIL. As we process them, however, other items are added
3779 * to the AIL. Since everything added to the AIL must come after
3780 * everything already in the AIL, we stop processing as soon as
3781 * we see something other than an EFI in the AIL.
3784 xlog_recover_process_efis(
3787 xfs_log_item_t
*lip
;
3788 xfs_efi_log_item_t
*efip
;
3790 struct xfs_ail_cursor cur
;
3791 struct xfs_ail
*ailp
;
3794 spin_lock(&ailp
->xa_lock
);
3795 lip
= xfs_trans_ail_cursor_first(ailp
, &cur
, 0);
3796 while (lip
!= NULL
) {
3798 * We're done when we see something other than an EFI.
3799 * There should be no EFIs left in the AIL now.
3801 if (lip
->li_type
!= XFS_LI_EFI
) {
3803 for (; lip
; lip
= xfs_trans_ail_cursor_next(ailp
, &cur
))
3804 ASSERT(lip
->li_type
!= XFS_LI_EFI
);
3810 * Skip EFIs that we've already processed.
3812 efip
= (xfs_efi_log_item_t
*)lip
;
3813 if (test_bit(XFS_EFI_RECOVERED
, &efip
->efi_flags
)) {
3814 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3818 spin_unlock(&ailp
->xa_lock
);
3819 error
= xlog_recover_process_efi(log
->l_mp
, efip
);
3820 spin_lock(&ailp
->xa_lock
);
3823 lip
= xfs_trans_ail_cursor_next(ailp
, &cur
);
3826 xfs_trans_ail_cursor_done(&cur
);
3827 spin_unlock(&ailp
->xa_lock
);
3832 * This routine performs a transaction to null out a bad inode pointer
3833 * in an agi unlinked inode hash bucket.
3836 xlog_recover_clear_agi_bucket(
3838 xfs_agnumber_t agno
,
3847 tp
= xfs_trans_alloc(mp
, XFS_TRANS_CLEAR_AGI_BUCKET
);
3848 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_clearagi
, 0, 0);
3852 error
= xfs_read_agi(mp
, tp
, agno
, &agibp
);
3856 agi
= XFS_BUF_TO_AGI(agibp
);
3857 agi
->agi_unlinked
[bucket
] = cpu_to_be32(NULLAGINO
);
3858 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
3859 (sizeof(xfs_agino_t
) * bucket
);
3860 xfs_trans_log_buf(tp
, agibp
, offset
,
3861 (offset
+ sizeof(xfs_agino_t
) - 1));
3863 error
= xfs_trans_commit(tp
, 0);
3869 xfs_trans_cancel(tp
, XFS_TRANS_ABORT
);
3871 xfs_warn(mp
, "%s: failed to clear agi %d. Continuing.", __func__
, agno
);
3876 xlog_recover_process_one_iunlink(
3877 struct xfs_mount
*mp
,
3878 xfs_agnumber_t agno
,
3882 struct xfs_buf
*ibp
;
3883 struct xfs_dinode
*dip
;
3884 struct xfs_inode
*ip
;
3888 ino
= XFS_AGINO_TO_INO(mp
, agno
, agino
);
3889 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, &ip
);
3894 * Get the on disk inode to find the next inode in the bucket.
3896 error
= xfs_imap_to_bp(mp
, NULL
, &ip
->i_imap
, &dip
, &ibp
, 0, 0);
3900 ASSERT(ip
->i_d
.di_nlink
== 0);
3901 ASSERT(ip
->i_d
.di_mode
!= 0);
3903 /* setup for the next pass */
3904 agino
= be32_to_cpu(dip
->di_next_unlinked
);
3908 * Prevent any DMAPI event from being sent when the reference on
3909 * the inode is dropped.
3911 ip
->i_d
.di_dmevmask
= 0;
3920 * We can't read in the inode this bucket points to, or this inode
3921 * is messed up. Just ditch this bucket of inodes. We will lose
3922 * some inodes and space, but at least we won't hang.
3924 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
3925 * clear the inode pointer in the bucket.
3927 xlog_recover_clear_agi_bucket(mp
, agno
, bucket
);
3932 * xlog_iunlink_recover
3934 * This is called during recovery to process any inodes which
3935 * we unlinked but not freed when the system crashed. These
3936 * inodes will be on the lists in the AGI blocks. What we do
3937 * here is scan all the AGIs and fully truncate and free any
3938 * inodes found on the lists. Each inode is removed from the
3939 * lists when it has been fully truncated and is freed. The
3940 * freeing of the inode and its removal from the list must be
3944 xlog_recover_process_iunlinks(
3948 xfs_agnumber_t agno
;
3959 * Prevent any DMAPI event from being sent while in this function.
3961 mp_dmevmask
= mp
->m_dmevmask
;
3964 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
3966 * Find the agi for this ag.
3968 error
= xfs_read_agi(mp
, NULL
, agno
, &agibp
);
3971 * AGI is b0rked. Don't process it.
3973 * We should probably mark the filesystem as corrupt
3974 * after we've recovered all the ag's we can....
3979 * Unlock the buffer so that it can be acquired in the normal
3980 * course of the transaction to truncate and free each inode.
3981 * Because we are not racing with anyone else here for the AGI
3982 * buffer, we don't even need to hold it locked to read the
3983 * initial unlinked bucket entries out of the buffer. We keep
3984 * buffer reference though, so that it stays pinned in memory
3985 * while we need the buffer.
3987 agi
= XFS_BUF_TO_AGI(agibp
);
3988 xfs_buf_unlock(agibp
);
3990 for (bucket
= 0; bucket
< XFS_AGI_UNLINKED_BUCKETS
; bucket
++) {
3991 agino
= be32_to_cpu(agi
->agi_unlinked
[bucket
]);
3992 while (agino
!= NULLAGINO
) {
3993 agino
= xlog_recover_process_one_iunlink(mp
,
3994 agno
, agino
, bucket
);
3997 xfs_buf_rele(agibp
);
4000 mp
->m_dmevmask
= mp_dmevmask
;
4004 * Upack the log buffer data and crc check it. If the check fails, issue a
4005 * warning if and only if the CRC in the header is non-zero. This makes the
4006 * check an advisory warning, and the zero CRC check will prevent failure
4007 * warnings from being emitted when upgrading the kernel from one that does not
4008 * add CRCs by default.
4010 * When filesystems are CRC enabled, this CRC mismatch becomes a fatal log
4011 * corruption failure
4014 xlog_unpack_data_crc(
4015 struct xlog_rec_header
*rhead
,
4021 crc
= xlog_cksum(log
, rhead
, dp
, be32_to_cpu(rhead
->h_len
));
4022 if (crc
!= rhead
->h_crc
) {
4023 if (rhead
->h_crc
|| xfs_sb_version_hascrc(&log
->l_mp
->m_sb
)) {
4024 xfs_alert(log
->l_mp
,
4025 "log record CRC mismatch: found 0x%x, expected 0x%x.",
4026 le32_to_cpu(rhead
->h_crc
),
4028 xfs_hex_dump(dp
, 32);
4032 * If we've detected a log record corruption, then we can't
4033 * recover past this point. Abort recovery if we are enforcing
4034 * CRC protection by punting an error back up the stack.
4036 if (xfs_sb_version_hascrc(&log
->l_mp
->m_sb
))
4037 return -EFSCORRUPTED
;
4045 struct xlog_rec_header
*rhead
,
4052 error
= xlog_unpack_data_crc(rhead
, dp
, log
);
4056 for (i
= 0; i
< BTOBB(be32_to_cpu(rhead
->h_len
)) &&
4057 i
< (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
); i
++) {
4058 *(__be32
*)dp
= *(__be32
*)&rhead
->h_cycle_data
[i
];
4062 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
4063 xlog_in_core_2_t
*xhdr
= (xlog_in_core_2_t
*)rhead
;
4064 for ( ; i
< BTOBB(be32_to_cpu(rhead
->h_len
)); i
++) {
4065 j
= i
/ (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
4066 k
= i
% (XLOG_HEADER_CYCLE_SIZE
/ BBSIZE
);
4067 *(__be32
*)dp
= xhdr
[j
].hic_xheader
.xh_cycle_data
[k
];
4076 xlog_valid_rec_header(
4078 struct xlog_rec_header
*rhead
,
4083 if (unlikely(rhead
->h_magicno
!= cpu_to_be32(XLOG_HEADER_MAGIC_NUM
))) {
4084 XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
4085 XFS_ERRLEVEL_LOW
, log
->l_mp
);
4086 return -EFSCORRUPTED
;
4089 (!rhead
->h_version
||
4090 (be32_to_cpu(rhead
->h_version
) & (~XLOG_VERSION_OKBITS
))))) {
4091 xfs_warn(log
->l_mp
, "%s: unrecognised log version (%d).",
4092 __func__
, be32_to_cpu(rhead
->h_version
));
4096 /* LR body must have data or it wouldn't have been written */
4097 hlen
= be32_to_cpu(rhead
->h_len
);
4098 if (unlikely( hlen
<= 0 || hlen
> INT_MAX
)) {
4099 XFS_ERROR_REPORT("xlog_valid_rec_header(2)",
4100 XFS_ERRLEVEL_LOW
, log
->l_mp
);
4101 return -EFSCORRUPTED
;
4103 if (unlikely( blkno
> log
->l_logBBsize
|| blkno
> INT_MAX
)) {
4104 XFS_ERROR_REPORT("xlog_valid_rec_header(3)",
4105 XFS_ERRLEVEL_LOW
, log
->l_mp
);
4106 return -EFSCORRUPTED
;
4112 * Read the log from tail to head and process the log records found.
4113 * Handle the two cases where the tail and head are in the same cycle
4114 * and where the active portion of the log wraps around the end of
4115 * the physical log separately. The pass parameter is passed through
4116 * to the routines called to process the data and is not looked at
4120 xlog_do_recovery_pass(
4122 xfs_daddr_t head_blk
,
4123 xfs_daddr_t tail_blk
,
4126 xlog_rec_header_t
*rhead
;
4129 xfs_buf_t
*hbp
, *dbp
;
4130 int error
= 0, h_size
;
4131 int bblks
, split_bblks
;
4132 int hblks
, split_hblks
, wrapped_hblks
;
4133 struct hlist_head rhash
[XLOG_RHASH_SIZE
];
4135 ASSERT(head_blk
!= tail_blk
);
4138 * Read the header of the tail block and get the iclog buffer size from
4139 * h_size. Use this to tell how many sectors make up the log header.
4141 if (xfs_sb_version_haslogv2(&log
->l_mp
->m_sb
)) {
4143 * When using variable length iclogs, read first sector of
4144 * iclog header and extract the header size from it. Get a
4145 * new hbp that is the correct size.
4147 hbp
= xlog_get_bp(log
, 1);
4151 error
= xlog_bread(log
, tail_blk
, 1, hbp
, &offset
);
4155 rhead
= (xlog_rec_header_t
*)offset
;
4156 error
= xlog_valid_rec_header(log
, rhead
, tail_blk
);
4159 h_size
= be32_to_cpu(rhead
->h_size
);
4160 if ((be32_to_cpu(rhead
->h_version
) & XLOG_VERSION_2
) &&
4161 (h_size
> XLOG_HEADER_CYCLE_SIZE
)) {
4162 hblks
= h_size
/ XLOG_HEADER_CYCLE_SIZE
;
4163 if (h_size
% XLOG_HEADER_CYCLE_SIZE
)
4166 hbp
= xlog_get_bp(log
, hblks
);
4171 ASSERT(log
->l_sectBBsize
== 1);
4173 hbp
= xlog_get_bp(log
, 1);
4174 h_size
= XLOG_BIG_RECORD_BSIZE
;
4179 dbp
= xlog_get_bp(log
, BTOBB(h_size
));
4185 memset(rhash
, 0, sizeof(rhash
));
4187 if (tail_blk
> head_blk
) {
4189 * Perform recovery around the end of the physical log.
4190 * When the head is not on the same cycle number as the tail,
4191 * we can't do a sequential recovery.
4193 while (blk_no
< log
->l_logBBsize
) {
4195 * Check for header wrapping around physical end-of-log
4197 offset
= hbp
->b_addr
;
4200 if (blk_no
+ hblks
<= log
->l_logBBsize
) {
4201 /* Read header in one read */
4202 error
= xlog_bread(log
, blk_no
, hblks
, hbp
,
4207 /* This LR is split across physical log end */
4208 if (blk_no
!= log
->l_logBBsize
) {
4209 /* some data before physical log end */
4210 ASSERT(blk_no
<= INT_MAX
);
4211 split_hblks
= log
->l_logBBsize
- (int)blk_no
;
4212 ASSERT(split_hblks
> 0);
4213 error
= xlog_bread(log
, blk_no
,
4221 * Note: this black magic still works with
4222 * large sector sizes (non-512) only because:
4223 * - we increased the buffer size originally
4224 * by 1 sector giving us enough extra space
4225 * for the second read;
4226 * - the log start is guaranteed to be sector
4228 * - we read the log end (LR header start)
4229 * _first_, then the log start (LR header end)
4230 * - order is important.
4232 wrapped_hblks
= hblks
- split_hblks
;
4233 error
= xlog_bread_offset(log
, 0,
4235 offset
+ BBTOB(split_hblks
));
4239 rhead
= (xlog_rec_header_t
*)offset
;
4240 error
= xlog_valid_rec_header(log
, rhead
,
4241 split_hblks
? blk_no
: 0);
4245 bblks
= (int)BTOBB(be32_to_cpu(rhead
->h_len
));
4248 /* Read in data for log record */
4249 if (blk_no
+ bblks
<= log
->l_logBBsize
) {
4250 error
= xlog_bread(log
, blk_no
, bblks
, dbp
,
4255 /* This log record is split across the
4256 * physical end of log */
4257 offset
= dbp
->b_addr
;
4259 if (blk_no
!= log
->l_logBBsize
) {
4260 /* some data is before the physical
4262 ASSERT(!wrapped_hblks
);
4263 ASSERT(blk_no
<= INT_MAX
);
4265 log
->l_logBBsize
- (int)blk_no
;
4266 ASSERT(split_bblks
> 0);
4267 error
= xlog_bread(log
, blk_no
,
4275 * Note: this black magic still works with
4276 * large sector sizes (non-512) only because:
4277 * - we increased the buffer size originally
4278 * by 1 sector giving us enough extra space
4279 * for the second read;
4280 * - the log start is guaranteed to be sector
4282 * - we read the log end (LR header start)
4283 * _first_, then the log start (LR header end)
4284 * - order is important.
4286 error
= xlog_bread_offset(log
, 0,
4287 bblks
- split_bblks
, dbp
,
4288 offset
+ BBTOB(split_bblks
));
4293 error
= xlog_unpack_data(rhead
, offset
, log
);
4297 error
= xlog_recover_process_data(log
, rhash
,
4298 rhead
, offset
, pass
);
4304 ASSERT(blk_no
>= log
->l_logBBsize
);
4305 blk_no
-= log
->l_logBBsize
;
4308 /* read first part of physical log */
4309 while (blk_no
< head_blk
) {
4310 error
= xlog_bread(log
, blk_no
, hblks
, hbp
, &offset
);
4314 rhead
= (xlog_rec_header_t
*)offset
;
4315 error
= xlog_valid_rec_header(log
, rhead
, blk_no
);
4319 /* blocks in data section */
4320 bblks
= (int)BTOBB(be32_to_cpu(rhead
->h_len
));
4321 error
= xlog_bread(log
, blk_no
+hblks
, bblks
, dbp
,
4326 error
= xlog_unpack_data(rhead
, offset
, log
);
4330 error
= xlog_recover_process_data(log
, rhash
,
4331 rhead
, offset
, pass
);
4334 blk_no
+= bblks
+ hblks
;
4345 * Do the recovery of the log. We actually do this in two phases.
4346 * The two passes are necessary in order to implement the function
4347 * of cancelling a record written into the log. The first pass
4348 * determines those things which have been cancelled, and the
4349 * second pass replays log items normally except for those which
4350 * have been cancelled. The handling of the replay and cancellations
4351 * takes place in the log item type specific routines.
4353 * The table of items which have cancel records in the log is allocated
4354 * and freed at this level, since only here do we know when all of
4355 * the log recovery has been completed.
4358 xlog_do_log_recovery(
4360 xfs_daddr_t head_blk
,
4361 xfs_daddr_t tail_blk
)
4365 ASSERT(head_blk
!= tail_blk
);
4368 * First do a pass to find all of the cancelled buf log items.
4369 * Store them in the buf_cancel_table for use in the second pass.
4371 log
->l_buf_cancel_table
= kmem_zalloc(XLOG_BC_TABLE_SIZE
*
4372 sizeof(struct list_head
),
4374 for (i
= 0; i
< XLOG_BC_TABLE_SIZE
; i
++)
4375 INIT_LIST_HEAD(&log
->l_buf_cancel_table
[i
]);
4377 error
= xlog_do_recovery_pass(log
, head_blk
, tail_blk
,
4378 XLOG_RECOVER_PASS1
);
4380 kmem_free(log
->l_buf_cancel_table
);
4381 log
->l_buf_cancel_table
= NULL
;
4385 * Then do a second pass to actually recover the items in the log.
4386 * When it is complete free the table of buf cancel items.
4388 error
= xlog_do_recovery_pass(log
, head_blk
, tail_blk
,
4389 XLOG_RECOVER_PASS2
);
4394 for (i
= 0; i
< XLOG_BC_TABLE_SIZE
; i
++)
4395 ASSERT(list_empty(&log
->l_buf_cancel_table
[i
]));
4399 kmem_free(log
->l_buf_cancel_table
);
4400 log
->l_buf_cancel_table
= NULL
;
4406 * Do the actual recovery
4411 xfs_daddr_t head_blk
,
4412 xfs_daddr_t tail_blk
)
4419 * First replay the images in the log.
4421 error
= xlog_do_log_recovery(log
, head_blk
, tail_blk
);
4426 * If IO errors happened during recovery, bail out.
4428 if (XFS_FORCED_SHUTDOWN(log
->l_mp
)) {
4433 * We now update the tail_lsn since much of the recovery has completed
4434 * and there may be space available to use. If there were no extent
4435 * or iunlinks, we can free up the entire log and set the tail_lsn to
4436 * be the last_sync_lsn. This was set in xlog_find_tail to be the
4437 * lsn of the last known good LR on disk. If there are extent frees
4438 * or iunlinks they will have some entries in the AIL; so we look at
4439 * the AIL to determine how to set the tail_lsn.
4441 xlog_assign_tail_lsn(log
->l_mp
);
4444 * Now that we've finished replaying all buffer and inode
4445 * updates, re-read in the superblock and reverify it.
4447 bp
= xfs_getsb(log
->l_mp
, 0);
4449 ASSERT(!(XFS_BUF_ISWRITE(bp
)));
4451 XFS_BUF_UNASYNC(bp
);
4452 bp
->b_ops
= &xfs_sb_buf_ops
;
4454 error
= xfs_buf_submit_wait(bp
);
4456 if (!XFS_FORCED_SHUTDOWN(log
->l_mp
)) {
4457 xfs_buf_ioerror_alert(bp
, __func__
);
4464 /* Convert superblock from on-disk format */
4465 sbp
= &log
->l_mp
->m_sb
;
4466 xfs_sb_from_disk(sbp
, XFS_BUF_TO_SBP(bp
));
4467 ASSERT(sbp
->sb_magicnum
== XFS_SB_MAGIC
);
4468 ASSERT(xfs_sb_good_version(sbp
));
4471 /* We've re-read the superblock so re-initialize per-cpu counters */
4472 xfs_icsb_reinit_counters(log
->l_mp
);
4474 xlog_recover_check_summary(log
);
4476 /* Normal transactions can now occur */
4477 log
->l_flags
&= ~XLOG_ACTIVE_RECOVERY
;
4482 * Perform recovery and re-initialize some log variables in xlog_find_tail.
4484 * Return error or zero.
4490 xfs_daddr_t head_blk
, tail_blk
;
4493 /* find the tail of the log */
4494 if ((error
= xlog_find_tail(log
, &head_blk
, &tail_blk
)))
4497 if (tail_blk
!= head_blk
) {
4498 /* There used to be a comment here:
4500 * disallow recovery on read-only mounts. note -- mount
4501 * checks for ENOSPC and turns it into an intelligent
4503 * ...but this is no longer true. Now, unless you specify
4504 * NORECOVERY (in which case this function would never be
4505 * called), we just go ahead and recover. We do this all
4506 * under the vfs layer, so we can get away with it unless
4507 * the device itself is read-only, in which case we fail.
4509 if ((error
= xfs_dev_is_read_only(log
->l_mp
, "recovery"))) {
4514 * Version 5 superblock log feature mask validation. We know the
4515 * log is dirty so check if there are any unknown log features
4516 * in what we need to recover. If there are unknown features
4517 * (e.g. unsupported transactions, then simply reject the
4518 * attempt at recovery before touching anything.
4520 if (XFS_SB_VERSION_NUM(&log
->l_mp
->m_sb
) == XFS_SB_VERSION_5
&&
4521 xfs_sb_has_incompat_log_feature(&log
->l_mp
->m_sb
,
4522 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
)) {
4524 "Superblock has unknown incompatible log features (0x%x) enabled.\n"
4525 "The log can not be fully and/or safely recovered by this kernel.\n"
4526 "Please recover the log on a kernel that supports the unknown features.",
4527 (log
->l_mp
->m_sb
.sb_features_log_incompat
&
4528 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN
));
4533 * Delay log recovery if the debug hook is set. This is debug
4534 * instrumention to coordinate simulation of I/O failures with
4537 if (xfs_globals
.log_recovery_delay
) {
4538 xfs_notice(log
->l_mp
,
4539 "Delaying log recovery for %d seconds.",
4540 xfs_globals
.log_recovery_delay
);
4541 msleep(xfs_globals
.log_recovery_delay
* 1000);
4544 xfs_notice(log
->l_mp
, "Starting recovery (logdev: %s)",
4545 log
->l_mp
->m_logname
? log
->l_mp
->m_logname
4548 error
= xlog_do_recover(log
, head_blk
, tail_blk
);
4549 log
->l_flags
|= XLOG_RECOVERY_NEEDED
;
4555 * In the first part of recovery we replay inodes and buffers and build
4556 * up the list of extent free items which need to be processed. Here
4557 * we process the extent free items and clean up the on disk unlinked
4558 * inode lists. This is separated from the first part of recovery so
4559 * that the root and real-time bitmap inodes can be read in from disk in
4560 * between the two stages. This is necessary so that we can free space
4561 * in the real-time portion of the file system.
4564 xlog_recover_finish(
4568 * Now we're ready to do the transactions needed for the
4569 * rest of recovery. Start with completing all the extent
4570 * free intent records and then process the unlinked inode
4571 * lists. At this point, we essentially run in normal mode
4572 * except that we're still performing recovery actions
4573 * rather than accepting new requests.
4575 if (log
->l_flags
& XLOG_RECOVERY_NEEDED
) {
4577 error
= xlog_recover_process_efis(log
);
4579 xfs_alert(log
->l_mp
, "Failed to recover EFIs");
4583 * Sync the log to get all the EFIs out of the AIL.
4584 * This isn't absolutely necessary, but it helps in
4585 * case the unlink transactions would have problems
4586 * pushing the EFIs out of the way.
4588 xfs_log_force(log
->l_mp
, XFS_LOG_SYNC
);
4590 xlog_recover_process_iunlinks(log
);
4592 xlog_recover_check_summary(log
);
4594 xfs_notice(log
->l_mp
, "Ending recovery (logdev: %s)",
4595 log
->l_mp
->m_logname
? log
->l_mp
->m_logname
4597 log
->l_flags
&= ~XLOG_RECOVERY_NEEDED
;
4599 xfs_info(log
->l_mp
, "Ending clean mount");
4607 * Read all of the agf and agi counters and check that they
4608 * are consistent with the superblock counters.
4611 xlog_recover_check_summary(
4618 xfs_agnumber_t agno
;
4619 __uint64_t freeblks
;
4629 for (agno
= 0; agno
< mp
->m_sb
.sb_agcount
; agno
++) {
4630 error
= xfs_read_agf(mp
, NULL
, agno
, 0, &agfbp
);
4632 xfs_alert(mp
, "%s agf read failed agno %d error %d",
4633 __func__
, agno
, error
);
4635 agfp
= XFS_BUF_TO_AGF(agfbp
);
4636 freeblks
+= be32_to_cpu(agfp
->agf_freeblks
) +
4637 be32_to_cpu(agfp
->agf_flcount
);
4638 xfs_buf_relse(agfbp
);
4641 error
= xfs_read_agi(mp
, NULL
, agno
, &agibp
);
4643 xfs_alert(mp
, "%s agi read failed agno %d error %d",
4644 __func__
, agno
, error
);
4646 struct xfs_agi
*agi
= XFS_BUF_TO_AGI(agibp
);
4648 itotal
+= be32_to_cpu(agi
->agi_count
);
4649 ifree
+= be32_to_cpu(agi
->agi_freecount
);
4650 xfs_buf_relse(agibp
);