2 * Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
34 #include "xfs_types.h"
38 #include "xfs_trans.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_bmap_btree.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_dir2_sf.h"
50 #include "xfs_attr_sf.h"
51 #include "xfs_dinode.h"
52 #include "xfs_inode.h"
53 #include "xfs_buf_item.h"
54 #include "xfs_trans_priv.h"
55 #include "xfs_error.h"
59 STATIC xfs_buf_t
*xfs_trans_buf_item_match(xfs_trans_t
*, xfs_buftarg_t
*,
61 STATIC xfs_buf_t
*xfs_trans_buf_item_match_all(xfs_trans_t
*, xfs_buftarg_t
*,
66 * Get and lock the buffer for the caller if it is not already
67 * locked within the given transaction. If it is already locked
68 * within the transaction, just increment its lock recursion count
69 * and return a pointer to it.
71 * Use the fast path function xfs_trans_buf_item_match() or the buffer
72 * cache routine incore_match() to find the buffer
73 * if it is already owned by this transaction.
75 * If we don't already own the buffer, use get_buf() to get it.
76 * If it doesn't yet have an associated xfs_buf_log_item structure,
77 * then allocate one and add the item to this transaction.
79 * If the transaction pointer is NULL, make this just a normal
83 xfs_trans_get_buf(xfs_trans_t
*tp
,
84 xfs_buftarg_t
*target_dev
,
90 xfs_buf_log_item_t
*bip
;
93 flags
= XFS_BUF_LOCK
| XFS_BUF_MAPPED
;
96 * Default to a normal get_buf() call if the tp is NULL.
99 bp
= xfs_buf_get_flags(target_dev
, blkno
, len
,
105 * If we find the buffer in the cache with this transaction
106 * pointer in its b_fsprivate2 field, then we know we already
107 * have it locked. In this case we just increment the lock
108 * recursion count and return the buffer to the caller.
110 if (tp
->t_items
.lic_next
== NULL
) {
111 bp
= xfs_trans_buf_item_match(tp
, target_dev
, blkno
, len
);
113 bp
= xfs_trans_buf_item_match_all(tp
, target_dev
, blkno
, len
);
116 ASSERT(XFS_BUF_VALUSEMA(bp
) <= 0);
117 if (XFS_FORCED_SHUTDOWN(tp
->t_mountp
)) {
118 xfs_buftrace("TRANS GET RECUR SHUT", bp
);
119 XFS_BUF_SUPER_STALE(bp
);
122 * If the buffer is stale then it was binval'ed
123 * since last read. This doesn't matter since the
124 * caller isn't allowed to use the data anyway.
126 else if (XFS_BUF_ISSTALE(bp
)) {
127 xfs_buftrace("TRANS GET RECUR STALE", bp
);
128 ASSERT(!XFS_BUF_ISDELAYWRITE(bp
));
130 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
131 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
133 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
135 xfs_buftrace("TRANS GET RECUR", bp
);
136 xfs_buf_item_trace("GET RECUR", bip
);
141 * We always specify the BUF_BUSY flag within a transaction so
142 * that get_buf does not try to push out a delayed write buffer
143 * which might cause another transaction to take place (if the
144 * buffer was delayed alloc). Such recursive transactions can
145 * easily deadlock with our current transaction as well as cause
146 * us to run out of stack space.
148 bp
= xfs_buf_get_flags(target_dev
, blkno
, len
, flags
| BUF_BUSY
);
153 ASSERT(!XFS_BUF_GETERROR(bp
));
156 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
157 * it doesn't have one yet, then allocate one and initialize it.
158 * The checks to see if one is there are in xfs_buf_item_init().
160 xfs_buf_item_init(bp
, tp
->t_mountp
);
163 * Set the recursion count for the buffer within this transaction
166 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
167 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
168 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
169 ASSERT(!(bip
->bli_flags
& XFS_BLI_LOGGED
));
173 * Take a reference for this transaction on the buf item.
175 atomic_inc(&bip
->bli_refcount
);
178 * Get a log_item_desc to point at the new item.
180 (void) xfs_trans_add_item(tp
, (xfs_log_item_t
*)bip
);
183 * Initialize b_fsprivate2 so we can find it with incore_match()
186 XFS_BUF_SET_FSPRIVATE2(bp
, tp
);
188 xfs_buftrace("TRANS GET", bp
);
189 xfs_buf_item_trace("GET", bip
);
194 * Get and lock the superblock buffer of this file system for the
197 * We don't need to use incore_match() here, because the superblock
198 * buffer is a private buffer which we keep a pointer to in the
202 xfs_trans_getsb(xfs_trans_t
*tp
,
203 struct xfs_mount
*mp
,
207 xfs_buf_log_item_t
*bip
;
210 * Default to just trying to lock the superblock buffer
214 return (xfs_getsb(mp
, flags
));
218 * If the superblock buffer already has this transaction
219 * pointer in its b_fsprivate2 field, then we know we already
220 * have it locked. In this case we just increment the lock
221 * recursion count and return the buffer to the caller.
224 if (XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
) {
225 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
227 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
229 xfs_buf_item_trace("GETSB RECUR", bip
);
233 bp
= xfs_getsb(mp
, flags
);
239 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
240 * it doesn't have one yet, then allocate one and initialize it.
241 * The checks to see if one is there are in xfs_buf_item_init().
243 xfs_buf_item_init(bp
, mp
);
246 * Set the recursion count for the buffer within this transaction
249 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
250 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
251 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
252 ASSERT(!(bip
->bli_flags
& XFS_BLI_LOGGED
));
256 * Take a reference for this transaction on the buf item.
258 atomic_inc(&bip
->bli_refcount
);
261 * Get a log_item_desc to point at the new item.
263 (void) xfs_trans_add_item(tp
, (xfs_log_item_t
*)bip
);
266 * Initialize b_fsprivate2 so we can find it with incore_match()
269 XFS_BUF_SET_FSPRIVATE2(bp
, tp
);
271 xfs_buf_item_trace("GETSB", bip
);
276 xfs_buftarg_t
*xfs_error_target
;
279 int xfs_error_mod
= 33;
283 * Get and lock the buffer for the caller if it is not already
284 * locked within the given transaction. If it has not yet been
285 * read in, read it from disk. If it is already locked
286 * within the transaction and already read in, just increment its
287 * lock recursion count and return a pointer to it.
289 * Use the fast path function xfs_trans_buf_item_match() or the buffer
290 * cache routine incore_match() to find the buffer
291 * if it is already owned by this transaction.
293 * If we don't already own the buffer, use read_buf() to get it.
294 * If it doesn't yet have an associated xfs_buf_log_item structure,
295 * then allocate one and add the item to this transaction.
297 * If the transaction pointer is NULL, make this just a normal
304 xfs_buftarg_t
*target
,
311 xfs_buf_log_item_t
*bip
;
315 flags
= XFS_BUF_LOCK
| XFS_BUF_MAPPED
;
318 * Default to a normal get_buf() call if the tp is NULL.
321 bp
= xfs_buf_read_flags(target
, blkno
, len
, flags
| BUF_BUSY
);
323 return XFS_ERROR(ENOMEM
);
325 if ((bp
!= NULL
) && (XFS_BUF_GETERROR(bp
) != 0)) {
326 xfs_ioerror_alert("xfs_trans_read_buf", mp
,
328 error
= XFS_BUF_GETERROR(bp
);
333 if (xfs_do_error
&& (bp
!= NULL
)) {
334 if (xfs_error_target
== target
) {
335 if (((xfs_req_num
++) % xfs_error_mod
) == 0) {
337 printk("Returning error!\n");
338 return XFS_ERROR(EIO
);
343 if (XFS_FORCED_SHUTDOWN(mp
))
350 * If we find the buffer in the cache with this transaction
351 * pointer in its b_fsprivate2 field, then we know we already
352 * have it locked. If it is already read in we just increment
353 * the lock recursion count and return the buffer to the caller.
354 * If the buffer is not yet read in, then we read it in, increment
355 * the lock recursion count, and return it to the caller.
357 if (tp
->t_items
.lic_next
== NULL
) {
358 bp
= xfs_trans_buf_item_match(tp
, target
, blkno
, len
);
360 bp
= xfs_trans_buf_item_match_all(tp
, target
, blkno
, len
);
363 ASSERT(XFS_BUF_VALUSEMA(bp
) <= 0);
364 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
365 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
366 ASSERT((XFS_BUF_ISERROR(bp
)) == 0);
367 if (!(XFS_BUF_ISDONE(bp
))) {
368 xfs_buftrace("READ_BUF_INCORE !DONE", bp
);
369 ASSERT(!XFS_BUF_ISASYNC(bp
));
371 xfsbdstrat(tp
->t_mountp
, bp
);
373 if (XFS_BUF_GETERROR(bp
) != 0) {
374 xfs_ioerror_alert("xfs_trans_read_buf", mp
,
376 error
= XFS_BUF_GETERROR(bp
);
379 * We can gracefully recover from most
380 * read errors. Ones we can't are those
381 * that happen after the transaction's
384 if (tp
->t_flags
& XFS_TRANS_DIRTY
)
385 xfs_force_shutdown(tp
->t_mountp
,
386 XFS_METADATA_IO_ERROR
);
391 * We never locked this buf ourselves, so we shouldn't
392 * brelse it either. Just get out.
394 if (XFS_FORCED_SHUTDOWN(mp
)) {
395 xfs_buftrace("READ_BUF_INCORE XFSSHUTDN", bp
);
397 return XFS_ERROR(EIO
);
401 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
404 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
405 xfs_buf_item_trace("READ RECUR", bip
);
411 * We always specify the BUF_BUSY flag within a transaction so
412 * that get_buf does not try to push out a delayed write buffer
413 * which might cause another transaction to take place (if the
414 * buffer was delayed alloc). Such recursive transactions can
415 * easily deadlock with our current transaction as well as cause
416 * us to run out of stack space.
418 bp
= xfs_buf_read_flags(target
, blkno
, len
, flags
| BUF_BUSY
);
423 if (XFS_BUF_GETERROR(bp
) != 0) {
424 XFS_BUF_SUPER_STALE(bp
);
425 xfs_buftrace("READ ERROR", bp
);
426 error
= XFS_BUF_GETERROR(bp
);
428 xfs_ioerror_alert("xfs_trans_read_buf", mp
,
430 if (tp
->t_flags
& XFS_TRANS_DIRTY
)
431 xfs_force_shutdown(tp
->t_mountp
, XFS_METADATA_IO_ERROR
);
436 if (xfs_do_error
&& !(tp
->t_flags
& XFS_TRANS_DIRTY
)) {
437 if (xfs_error_target
== target
) {
438 if (((xfs_req_num
++) % xfs_error_mod
) == 0) {
439 xfs_force_shutdown(tp
->t_mountp
,
440 XFS_METADATA_IO_ERROR
);
442 printk("Returning error in trans!\n");
443 return XFS_ERROR(EIO
);
448 if (XFS_FORCED_SHUTDOWN(mp
))
452 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
453 * it doesn't have one yet, then allocate one and initialize it.
454 * The checks to see if one is there are in xfs_buf_item_init().
456 xfs_buf_item_init(bp
, tp
->t_mountp
);
459 * Set the recursion count for the buffer within this transaction
462 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
463 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
464 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
465 ASSERT(!(bip
->bli_flags
& XFS_BLI_LOGGED
));
469 * Take a reference for this transaction on the buf item.
471 atomic_inc(&bip
->bli_refcount
);
474 * Get a log_item_desc to point at the new item.
476 (void) xfs_trans_add_item(tp
, (xfs_log_item_t
*)bip
);
479 * Initialize b_fsprivate2 so we can find it with incore_match()
482 XFS_BUF_SET_FSPRIVATE2(bp
, tp
);
484 xfs_buftrace("TRANS READ", bp
);
485 xfs_buf_item_trace("READ", bip
);
491 * the theory here is that buffer is good but we're
492 * bailing out because the filesystem is being forcibly
493 * shut down. So we should leave the b_flags alone since
494 * the buffer's not staled and just get out.
497 if (XFS_BUF_ISSTALE(bp
) && XFS_BUF_ISDELAYWRITE(bp
))
498 cmn_err(CE_NOTE
, "about to pop assert, bp == 0x%p", bp
);
500 ASSERT((XFS_BUF_BFLAGS(bp
) & (XFS_B_STALE
|XFS_B_DELWRI
)) !=
501 (XFS_B_STALE
|XFS_B_DELWRI
));
503 xfs_buftrace("READ_BUF XFSSHUTDN", bp
);
506 return XFS_ERROR(EIO
);
511 * Release the buffer bp which was previously acquired with one of the
512 * xfs_trans_... buffer allocation routines if the buffer has not
513 * been modified within this transaction. If the buffer is modified
514 * within this transaction, do decrement the recursion count but do
515 * not release the buffer even if the count goes to 0. If the buffer is not
516 * modified within the transaction, decrement the recursion count and
517 * release the buffer if the recursion count goes to 0.
519 * If the buffer is to be released and it was not modified before
520 * this transaction began, then free the buf_log_item associated with it.
522 * If the transaction pointer is NULL, make this just a normal
526 xfs_trans_brelse(xfs_trans_t
*tp
,
529 xfs_buf_log_item_t
*bip
;
531 xfs_log_item_desc_t
*lidp
;
534 * Default to a normal brelse() call if the tp is NULL.
537 ASSERT(XFS_BUF_FSPRIVATE2(bp
, void *) == NULL
);
539 * If there's a buf log item attached to the buffer,
540 * then let the AIL know that the buffer is being
543 if (XFS_BUF_FSPRIVATE(bp
, void *) != NULL
) {
544 lip
= XFS_BUF_FSPRIVATE(bp
, xfs_log_item_t
*);
545 if (lip
->li_type
== XFS_LI_BUF
) {
546 bip
= XFS_BUF_FSPRIVATE(bp
,xfs_buf_log_item_t
*);
547 xfs_trans_unlocked_item(
548 bip
->bli_item
.li_mountp
,
556 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
557 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
558 ASSERT(bip
->bli_item
.li_type
== XFS_LI_BUF
);
559 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
560 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
561 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
564 * Find the item descriptor pointing to this buffer's
565 * log item. It must be there.
567 lidp
= xfs_trans_find_item(tp
, (xfs_log_item_t
*)bip
);
568 ASSERT(lidp
!= NULL
);
571 * If the release is just for a recursive lock,
572 * then decrement the count and return.
574 if (bip
->bli_recur
> 0) {
576 xfs_buf_item_trace("RELSE RECUR", bip
);
581 * If the buffer is dirty within this transaction, we can't
582 * release it until we commit.
584 if (lidp
->lid_flags
& XFS_LID_DIRTY
) {
585 xfs_buf_item_trace("RELSE DIRTY", bip
);
590 * If the buffer has been invalidated, then we can't release
591 * it until the transaction commits to disk unless it is re-dirtied
592 * as part of this transaction. This prevents us from pulling
593 * the item from the AIL before we should.
595 if (bip
->bli_flags
& XFS_BLI_STALE
) {
596 xfs_buf_item_trace("RELSE STALE", bip
);
600 ASSERT(!(bip
->bli_flags
& XFS_BLI_LOGGED
));
601 xfs_buf_item_trace("RELSE", bip
);
604 * Free up the log item descriptor tracking the released item.
606 xfs_trans_free_item(tp
, lidp
);
609 * Clear the hold flag in the buf log item if it is set.
610 * We wouldn't want the next user of the buffer to
613 if (bip
->bli_flags
& XFS_BLI_HOLD
) {
614 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
618 * Drop our reference to the buf log item.
620 atomic_dec(&bip
->bli_refcount
);
623 * If the buf item is not tracking data in the log, then
624 * we must free it before releasing the buffer back to the
625 * free pool. Before releasing the buffer to the free pool,
626 * clear the transaction pointer in b_fsprivate2 to dissolve
627 * its relation to this transaction.
629 if (!xfs_buf_item_dirty(bip
)) {
631 ASSERT(bp->b_pincount == 0);
633 ASSERT(atomic_read(&bip
->bli_refcount
) == 0);
634 ASSERT(!(bip
->bli_item
.li_flags
& XFS_LI_IN_AIL
));
635 ASSERT(!(bip
->bli_flags
& XFS_BLI_INODE_ALLOC_BUF
));
636 xfs_buf_item_relse(bp
);
639 XFS_BUF_SET_FSPRIVATE2(bp
, NULL
);
642 * If we've still got a buf log item on the buffer, then
643 * tell the AIL that the buffer is being unlocked.
646 xfs_trans_unlocked_item(bip
->bli_item
.li_mountp
,
647 (xfs_log_item_t
*)bip
);
655 * Add the locked buffer to the transaction.
656 * The buffer must be locked, and it cannot be associated with any
659 * If the buffer does not yet have a buf log item associated with it,
660 * then allocate one for it. Then add the buf item to the transaction.
663 xfs_trans_bjoin(xfs_trans_t
*tp
,
666 xfs_buf_log_item_t
*bip
;
668 ASSERT(XFS_BUF_ISBUSY(bp
));
669 ASSERT(XFS_BUF_FSPRIVATE2(bp
, void *) == NULL
);
672 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
673 * it doesn't have one yet, then allocate one and initialize it.
674 * The checks to see if one is there are in xfs_buf_item_init().
676 xfs_buf_item_init(bp
, tp
->t_mountp
);
677 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
678 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
679 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
680 ASSERT(!(bip
->bli_flags
& XFS_BLI_LOGGED
));
683 * Take a reference for this transaction on the buf item.
685 atomic_inc(&bip
->bli_refcount
);
688 * Get a log_item_desc to point at the new item.
690 (void) xfs_trans_add_item(tp
, (xfs_log_item_t
*)bip
);
693 * Initialize b_fsprivate2 so we can find it with incore_match()
694 * in xfs_trans_get_buf() and friends above.
696 XFS_BUF_SET_FSPRIVATE2(bp
, tp
);
698 xfs_buf_item_trace("BJOIN", bip
);
702 * Mark the buffer as not needing to be unlocked when the buf item's
703 * IOP_UNLOCK() routine is called. The buffer must already be locked
704 * and associated with the given transaction.
708 xfs_trans_bhold(xfs_trans_t
*tp
,
711 xfs_buf_log_item_t
*bip
;
713 ASSERT(XFS_BUF_ISBUSY(bp
));
714 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
715 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
717 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
718 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
719 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
720 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
721 bip
->bli_flags
|= XFS_BLI_HOLD
;
722 xfs_buf_item_trace("BHOLD", bip
);
726 * Cancel the previous buffer hold request made on this buffer
727 * for this transaction.
730 xfs_trans_bhold_release(xfs_trans_t
*tp
,
733 xfs_buf_log_item_t
*bip
;
735 ASSERT(XFS_BUF_ISBUSY(bp
));
736 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
737 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
739 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
740 ASSERT(!(bip
->bli_flags
& XFS_BLI_STALE
));
741 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
));
742 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
743 ASSERT(bip
->bli_flags
& XFS_BLI_HOLD
);
744 bip
->bli_flags
&= ~XFS_BLI_HOLD
;
745 xfs_buf_item_trace("BHOLD RELEASE", bip
);
749 * This is called to mark bytes first through last inclusive of the given
750 * buffer as needing to be logged when the transaction is committed.
751 * The buffer must already be associated with the given transaction.
753 * First and last are numbers relative to the beginning of this buffer,
754 * so the first byte in the buffer is numbered 0 regardless of the
758 xfs_trans_log_buf(xfs_trans_t
*tp
,
763 xfs_buf_log_item_t
*bip
;
764 xfs_log_item_desc_t
*lidp
;
766 ASSERT(XFS_BUF_ISBUSY(bp
));
767 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
768 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
769 ASSERT((first
<= last
) && (last
< XFS_BUF_COUNT(bp
)));
770 ASSERT((XFS_BUF_IODONE_FUNC(bp
) == NULL
) ||
771 (XFS_BUF_IODONE_FUNC(bp
) == xfs_buf_iodone_callbacks
));
774 * Mark the buffer as needing to be written out eventually,
775 * and set its iodone function to remove the buffer's buf log
776 * item from the AIL and free it when the buffer is flushed
777 * to disk. See xfs_buf_attach_iodone() for more details
778 * on li_cb and xfs_buf_iodone_callbacks().
779 * If we end up aborting this transaction, we trap this buffer
780 * inside the b_bdstrat callback so that this won't get written to
783 XFS_BUF_DELAYWRITE(bp
);
786 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
787 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
788 XFS_BUF_SET_IODONE_FUNC(bp
, xfs_buf_iodone_callbacks
);
789 bip
->bli_item
.li_cb
= (void(*)(xfs_buf_t
*,xfs_log_item_t
*))xfs_buf_iodone
;
792 * If we invalidated the buffer within this transaction, then
793 * cancel the invalidation now that we're dirtying the buffer
794 * again. There are no races with the code in xfs_buf_item_unpin(),
795 * because we have a reference to the buffer this entire time.
797 if (bip
->bli_flags
& XFS_BLI_STALE
) {
798 xfs_buf_item_trace("BLOG UNSTALE", bip
);
799 bip
->bli_flags
&= ~XFS_BLI_STALE
;
800 ASSERT(XFS_BUF_ISSTALE(bp
));
802 bip
->bli_format
.blf_flags
&= ~XFS_BLI_CANCEL
;
805 lidp
= xfs_trans_find_item(tp
, (xfs_log_item_t
*)bip
);
806 ASSERT(lidp
!= NULL
);
808 tp
->t_flags
|= XFS_TRANS_DIRTY
;
809 lidp
->lid_flags
|= XFS_LID_DIRTY
;
810 lidp
->lid_flags
&= ~XFS_LID_BUF_STALE
;
811 bip
->bli_flags
|= XFS_BLI_LOGGED
;
812 xfs_buf_item_log(bip
, first
, last
);
813 xfs_buf_item_trace("BLOG", bip
);
818 * This called to invalidate a buffer that is being used within
819 * a transaction. Typically this is because the blocks in the
820 * buffer are being freed, so we need to prevent it from being
821 * written out when we're done. Allowing it to be written again
822 * might overwrite data in the free blocks if they are reallocated
825 * We prevent the buffer from being written out by clearing the
826 * B_DELWRI flag. We can't always
827 * get rid of the buf log item at this point, though, because
828 * the buffer may still be pinned by another transaction. If that
829 * is the case, then we'll wait until the buffer is committed to
830 * disk for the last time (we can tell by the ref count) and
831 * free it in xfs_buf_item_unpin(). Until it is cleaned up we
832 * will keep the buffer locked so that the buffer and buf log item
840 xfs_log_item_desc_t
*lidp
;
841 xfs_buf_log_item_t
*bip
;
843 ASSERT(XFS_BUF_ISBUSY(bp
));
844 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
845 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
847 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
848 lidp
= xfs_trans_find_item(tp
, (xfs_log_item_t
*)bip
);
849 ASSERT(lidp
!= NULL
);
850 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
852 if (bip
->bli_flags
& XFS_BLI_STALE
) {
854 * If the buffer is already invalidated, then
857 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp
)));
858 ASSERT(XFS_BUF_ISSTALE(bp
));
859 ASSERT(!(bip
->bli_flags
& (XFS_BLI_LOGGED
| XFS_BLI_DIRTY
)));
860 ASSERT(!(bip
->bli_format
.blf_flags
& XFS_BLI_INODE_BUF
));
861 ASSERT(bip
->bli_format
.blf_flags
& XFS_BLI_CANCEL
);
862 ASSERT(lidp
->lid_flags
& XFS_LID_DIRTY
);
863 ASSERT(tp
->t_flags
& XFS_TRANS_DIRTY
);
864 xfs_buftrace("XFS_BINVAL RECUR", bp
);
865 xfs_buf_item_trace("BINVAL RECUR", bip
);
870 * Clear the dirty bit in the buffer and set the STALE flag
871 * in the buf log item. The STALE flag will be used in
872 * xfs_buf_item_unpin() to determine if it should clean up
873 * when the last reference to the buf item is given up.
874 * We set the XFS_BLI_CANCEL flag in the buf log format structure
875 * and log the buf item. This will be used at recovery time
876 * to determine that copies of the buffer in the log before
877 * this should not be replayed.
878 * We mark the item descriptor and the transaction dirty so
879 * that we'll hold the buffer until after the commit.
881 * Since we're invalidating the buffer, we also clear the state
882 * about which parts of the buffer have been logged. We also
883 * clear the flag indicating that this is an inode buffer since
884 * the data in the buffer will no longer be valid.
886 * We set the stale bit in the buffer as well since we're getting
889 XFS_BUF_UNDELAYWRITE(bp
);
891 bip
->bli_flags
|= XFS_BLI_STALE
;
892 bip
->bli_flags
&= ~(XFS_BLI_LOGGED
| XFS_BLI_DIRTY
);
893 bip
->bli_format
.blf_flags
&= ~XFS_BLI_INODE_BUF
;
894 bip
->bli_format
.blf_flags
|= XFS_BLI_CANCEL
;
895 memset((char *)(bip
->bli_format
.blf_data_map
), 0,
896 (bip
->bli_format
.blf_map_size
* sizeof(uint
)));
897 lidp
->lid_flags
|= XFS_LID_DIRTY
|XFS_LID_BUF_STALE
;
898 tp
->t_flags
|= XFS_TRANS_DIRTY
;
899 xfs_buftrace("XFS_BINVAL", bp
);
900 xfs_buf_item_trace("BINVAL", bip
);
904 * This call is used to indicate that the buffer contains on-disk
905 * inodes which must be handled specially during recovery. They
906 * require special handling because only the di_next_unlinked from
907 * the inodes in the buffer should be recovered. The rest of the
908 * data in the buffer is logged via the inodes themselves.
910 * All we do is set the XFS_BLI_INODE_BUF flag in the buffer's log
911 * format structure so that we'll know what to do at recovery time.
919 xfs_buf_log_item_t
*bip
;
921 ASSERT(XFS_BUF_ISBUSY(bp
));
922 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
923 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
925 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
926 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
928 bip
->bli_format
.blf_flags
|= XFS_BLI_INODE_BUF
;
932 * This call is used to indicate that the buffer is going to
933 * be staled and was an inode buffer. This means it gets
934 * special processing during unpin - where any inodes
935 * associated with the buffer should be removed from ail.
936 * There is also special processing during recovery,
937 * any replay of the inodes in the buffer needs to be
938 * prevented as the buffer may have been reused.
941 xfs_trans_stale_inode_buf(
945 xfs_buf_log_item_t
*bip
;
947 ASSERT(XFS_BUF_ISBUSY(bp
));
948 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
949 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
951 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
952 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
954 bip
->bli_flags
|= XFS_BLI_STALE_INODE
;
955 bip
->bli_item
.li_cb
= (void(*)(xfs_buf_t
*,xfs_log_item_t
*))
962 * Mark the buffer as being one which contains newly allocated
963 * inodes. We need to make sure that even if this buffer is
964 * relogged as an 'inode buf' we still recover all of the inode
965 * images in the face of a crash. This works in coordination with
966 * xfs_buf_item_committed() to ensure that the buffer remains in the
967 * AIL at its original location even after it has been relogged.
971 xfs_trans_inode_alloc_buf(
975 xfs_buf_log_item_t
*bip
;
977 ASSERT(XFS_BUF_ISBUSY(bp
));
978 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
979 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
981 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
982 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
984 bip
->bli_flags
|= XFS_BLI_INODE_ALLOC_BUF
;
989 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
990 * dquots. However, unlike in inode buffer recovery, dquot buffers get
991 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
992 * The only thing that makes dquot buffers different from regular
993 * buffers is that we must not replay dquot bufs when recovering
994 * if a _corresponding_ quotaoff has happened. We also have to distinguish
995 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
996 * can be turned off independently.
1000 xfs_trans_dquot_buf(
1005 xfs_buf_log_item_t
*bip
;
1007 ASSERT(XFS_BUF_ISBUSY(bp
));
1008 ASSERT(XFS_BUF_FSPRIVATE2(bp
, xfs_trans_t
*) == tp
);
1009 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
1010 ASSERT(type
== XFS_BLI_UDQUOT_BUF
||
1011 type
== XFS_BLI_PDQUOT_BUF
||
1012 type
== XFS_BLI_GDQUOT_BUF
);
1014 bip
= XFS_BUF_FSPRIVATE(bp
, xfs_buf_log_item_t
*);
1015 ASSERT(atomic_read(&bip
->bli_refcount
) > 0);
1017 bip
->bli_format
.blf_flags
|= type
;
1021 * Check to see if a buffer matching the given parameters is already
1022 * a part of the given transaction. Only check the first, embedded
1023 * chunk, since we don't want to spend all day scanning large transactions.
1026 xfs_trans_buf_item_match(
1028 xfs_buftarg_t
*target
,
1032 xfs_log_item_chunk_t
*licp
;
1033 xfs_log_item_desc_t
*lidp
;
1034 xfs_buf_log_item_t
*blip
;
1040 licp
= &tp
->t_items
;
1041 if (!XFS_LIC_ARE_ALL_FREE(licp
)) {
1042 for (i
= 0; i
< licp
->lic_unused
; i
++) {
1044 * Skip unoccupied slots.
1046 if (XFS_LIC_ISFREE(licp
, i
)) {
1050 lidp
= XFS_LIC_SLOT(licp
, i
);
1051 blip
= (xfs_buf_log_item_t
*)lidp
->lid_item
;
1052 if (blip
->bli_item
.li_type
!= XFS_LI_BUF
) {
1057 if ((XFS_BUF_TARGET(bp
) == target
) &&
1058 (XFS_BUF_ADDR(bp
) == blkno
) &&
1059 (XFS_BUF_COUNT(bp
) == len
)) {
1061 * We found it. Break out and
1062 * return the pointer to the buffer.
1074 * Check to see if a buffer matching the given parameters is already
1075 * a part of the given transaction. Check all the chunks, we
1076 * want to be thorough.
1079 xfs_trans_buf_item_match_all(
1081 xfs_buftarg_t
*target
,
1085 xfs_log_item_chunk_t
*licp
;
1086 xfs_log_item_desc_t
*lidp
;
1087 xfs_buf_log_item_t
*blip
;
1093 for (licp
= &tp
->t_items
; licp
!= NULL
; licp
= licp
->lic_next
) {
1094 if (XFS_LIC_ARE_ALL_FREE(licp
)) {
1095 ASSERT(licp
== &tp
->t_items
);
1096 ASSERT(licp
->lic_next
== NULL
);
1099 for (i
= 0; i
< licp
->lic_unused
; i
++) {
1101 * Skip unoccupied slots.
1103 if (XFS_LIC_ISFREE(licp
, i
)) {
1107 lidp
= XFS_LIC_SLOT(licp
, i
);
1108 blip
= (xfs_buf_log_item_t
*)lidp
->lid_item
;
1109 if (blip
->bli_item
.li_type
!= XFS_LI_BUF
) {
1114 if ((XFS_BUF_TARGET(bp
) == target
) &&
1115 (XFS_BUF_ADDR(bp
) == blkno
) &&
1116 (XFS_BUF_COUNT(bp
) == len
)) {
1118 * We found it. Break out and
1119 * return the pointer to the buffer.
This page took 0.126839 seconds and 5 git commands to generate.