2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * Copyright (C) 2010 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_extent_busy.h"
28 #include "xfs_quota.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
32 #include "xfs_trace.h"
33 #include "xfs_error.h"
35 kmem_zone_t
*xfs_trans_zone
;
36 kmem_zone_t
*xfs_log_item_desc_zone
;
39 * Initialize the precomputed transaction reservation values
40 * in the mount structure.
46 xfs_trans_resv_calc(mp
, M_RES(mp
));
50 * This routine is called to allocate a transaction structure.
51 * The type parameter indicates the type of the transaction. These
52 * are enumerated in xfs_trans.h.
54 * Dynamically allocate the transaction structure from the transaction
55 * zone, initialize it, and return it to the caller.
64 sb_start_intwrite(mp
->m_super
);
65 tp
= _xfs_trans_alloc(mp
, type
, KM_SLEEP
);
66 tp
->t_flags
|= XFS_TRANS_FREEZE_PROT
;
74 xfs_km_flags_t memflags
)
78 WARN_ON(mp
->m_super
->s_writers
.frozen
== SB_FREEZE_COMPLETE
);
79 atomic_inc(&mp
->m_active_trans
);
81 tp
= kmem_zone_zalloc(xfs_trans_zone
, memflags
);
82 tp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
85 INIT_LIST_HEAD(&tp
->t_items
);
86 INIT_LIST_HEAD(&tp
->t_busy
);
91 * Free the transaction structure. If there is more clean up
92 * to do when the structure is freed, add it here.
98 xfs_extent_busy_sort(&tp
->t_busy
);
99 xfs_extent_busy_clear(tp
->t_mountp
, &tp
->t_busy
, false);
101 atomic_dec(&tp
->t_mountp
->m_active_trans
);
102 if (tp
->t_flags
& XFS_TRANS_FREEZE_PROT
)
103 sb_end_intwrite(tp
->t_mountp
->m_super
);
104 xfs_trans_free_dqinfo(tp
);
105 kmem_zone_free(xfs_trans_zone
, tp
);
109 * This is called to create a new transaction which will share the
110 * permanent log reservation of the given transaction. The remaining
111 * unused block and rt extent reservations are also inherited. This
112 * implies that the original transaction is no longer allowed to allocate
113 * blocks. Locks and log items, however, are no inherited. They must
114 * be added to the new transaction explicitly.
122 ntp
= kmem_zone_zalloc(xfs_trans_zone
, KM_SLEEP
);
125 * Initialize the new transaction structure.
127 ntp
->t_magic
= XFS_TRANS_HEADER_MAGIC
;
128 ntp
->t_type
= tp
->t_type
;
129 ntp
->t_mountp
= tp
->t_mountp
;
130 INIT_LIST_HEAD(&ntp
->t_items
);
131 INIT_LIST_HEAD(&ntp
->t_busy
);
133 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
134 ASSERT(tp
->t_ticket
!= NULL
);
136 ntp
->t_flags
= XFS_TRANS_PERM_LOG_RES
|
137 (tp
->t_flags
& XFS_TRANS_RESERVE
) |
138 (tp
->t_flags
& XFS_TRANS_FREEZE_PROT
);
139 /* We gave our writer reference to the new transaction */
140 tp
->t_flags
&= ~XFS_TRANS_FREEZE_PROT
;
141 ntp
->t_ticket
= xfs_log_ticket_get(tp
->t_ticket
);
142 ntp
->t_blk_res
= tp
->t_blk_res
- tp
->t_blk_res_used
;
143 tp
->t_blk_res
= tp
->t_blk_res_used
;
144 ntp
->t_rtx_res
= tp
->t_rtx_res
- tp
->t_rtx_res_used
;
145 tp
->t_rtx_res
= tp
->t_rtx_res_used
;
146 ntp
->t_pflags
= tp
->t_pflags
;
148 xfs_trans_dup_dqinfo(tp
, ntp
);
150 atomic_inc(&tp
->t_mountp
->m_active_trans
);
155 * This is called to reserve free disk blocks and log space for the
156 * given transaction. This must be done before allocating any resources
157 * within the transaction.
159 * This will return ENOSPC if there are not enough blocks available.
160 * It will sleep waiting for available log space.
161 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
162 * is used by long running transactions. If any one of the reservations
163 * fails then they will all be backed out.
165 * This does not do quota reservations. That typically is done by the
170 struct xfs_trans
*tp
,
171 struct xfs_trans_res
*resp
,
176 bool rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
178 /* Mark this thread as being in a transaction */
179 current_set_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
182 * Attempt to reserve the needed disk blocks by decrementing
183 * the number needed from the number available. This will
184 * fail if the count would go below zero.
187 error
= xfs_mod_fdblocks(tp
->t_mountp
, -((int64_t)blocks
), rsvd
);
189 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
192 tp
->t_blk_res
+= blocks
;
196 * Reserve the log space needed for this transaction.
198 if (resp
->tr_logres
> 0) {
199 bool permanent
= false;
201 ASSERT(tp
->t_log_res
== 0 ||
202 tp
->t_log_res
== resp
->tr_logres
);
203 ASSERT(tp
->t_log_count
== 0 ||
204 tp
->t_log_count
== resp
->tr_logcount
);
206 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
) {
207 tp
->t_flags
|= XFS_TRANS_PERM_LOG_RES
;
210 ASSERT(tp
->t_ticket
== NULL
);
211 ASSERT(!(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
));
214 if (tp
->t_ticket
!= NULL
) {
215 ASSERT(resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
);
216 error
= xfs_log_regrant(tp
->t_mountp
, tp
->t_ticket
);
218 error
= xfs_log_reserve(tp
->t_mountp
,
221 &tp
->t_ticket
, XFS_TRANSACTION
,
222 permanent
, tp
->t_type
);
228 tp
->t_log_res
= resp
->tr_logres
;
229 tp
->t_log_count
= resp
->tr_logcount
;
233 * Attempt to reserve the needed realtime extents by decrementing
234 * the number needed from the number available. This will
235 * fail if the count would go below zero.
238 error
= xfs_mod_frextents(tp
->t_mountp
, -((int64_t)rtextents
));
243 tp
->t_rtx_res
+= rtextents
;
249 * Error cases jump to one of these labels to undo any
250 * reservations which have already been performed.
253 if (resp
->tr_logres
> 0) {
256 if (resp
->tr_logflags
& XFS_TRANS_PERM_LOG_RES
) {
257 log_flags
= XFS_LOG_REL_PERM_RESERV
;
261 xfs_log_done(tp
->t_mountp
, tp
->t_ticket
, NULL
, log_flags
);
264 tp
->t_flags
&= ~XFS_TRANS_PERM_LOG_RES
;
269 xfs_mod_fdblocks(tp
->t_mountp
, -((int64_t)blocks
), rsvd
);
273 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
279 * Record the indicated change to the given field for application
280 * to the file system's superblock when the transaction commits.
281 * For now, just store the change in the transaction structure.
283 * Mark the transaction structure to indicate that the superblock
284 * needs to be updated before committing.
286 * Because we may not be keeping track of allocated/free inodes and
287 * used filesystem blocks in the superblock, we do not mark the
288 * superblock dirty in this transaction if we modify these fields.
289 * We still need to update the transaction deltas so that they get
290 * applied to the incore superblock, but we don't want them to
291 * cause the superblock to get locked and logged if these are the
292 * only fields in the superblock that the transaction modifies.
300 uint32_t flags
= (XFS_TRANS_DIRTY
|XFS_TRANS_SB_DIRTY
);
301 xfs_mount_t
*mp
= tp
->t_mountp
;
304 case XFS_TRANS_SB_ICOUNT
:
305 tp
->t_icount_delta
+= delta
;
306 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
307 flags
&= ~XFS_TRANS_SB_DIRTY
;
309 case XFS_TRANS_SB_IFREE
:
310 tp
->t_ifree_delta
+= delta
;
311 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
312 flags
&= ~XFS_TRANS_SB_DIRTY
;
314 case XFS_TRANS_SB_FDBLOCKS
:
316 * Track the number of blocks allocated in the
317 * transaction. Make sure it does not exceed the
321 tp
->t_blk_res_used
+= (uint
)-delta
;
322 ASSERT(tp
->t_blk_res_used
<= tp
->t_blk_res
);
324 tp
->t_fdblocks_delta
+= delta
;
325 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
326 flags
&= ~XFS_TRANS_SB_DIRTY
;
328 case XFS_TRANS_SB_RES_FDBLOCKS
:
330 * The allocation has already been applied to the
331 * in-core superblock's counter. This should only
332 * be applied to the on-disk superblock.
335 tp
->t_res_fdblocks_delta
+= delta
;
336 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
))
337 flags
&= ~XFS_TRANS_SB_DIRTY
;
339 case XFS_TRANS_SB_FREXTENTS
:
341 * Track the number of blocks allocated in the
342 * transaction. Make sure it does not exceed the
346 tp
->t_rtx_res_used
+= (uint
)-delta
;
347 ASSERT(tp
->t_rtx_res_used
<= tp
->t_rtx_res
);
349 tp
->t_frextents_delta
+= delta
;
351 case XFS_TRANS_SB_RES_FREXTENTS
:
353 * The allocation has already been applied to the
354 * in-core superblock's counter. This should only
355 * be applied to the on-disk superblock.
358 tp
->t_res_frextents_delta
+= delta
;
360 case XFS_TRANS_SB_DBLOCKS
:
362 tp
->t_dblocks_delta
+= delta
;
364 case XFS_TRANS_SB_AGCOUNT
:
366 tp
->t_agcount_delta
+= delta
;
368 case XFS_TRANS_SB_IMAXPCT
:
369 tp
->t_imaxpct_delta
+= delta
;
371 case XFS_TRANS_SB_REXTSIZE
:
372 tp
->t_rextsize_delta
+= delta
;
374 case XFS_TRANS_SB_RBMBLOCKS
:
375 tp
->t_rbmblocks_delta
+= delta
;
377 case XFS_TRANS_SB_RBLOCKS
:
378 tp
->t_rblocks_delta
+= delta
;
380 case XFS_TRANS_SB_REXTENTS
:
381 tp
->t_rextents_delta
+= delta
;
383 case XFS_TRANS_SB_REXTSLOG
:
384 tp
->t_rextslog_delta
+= delta
;
391 tp
->t_flags
|= flags
;
395 * xfs_trans_apply_sb_deltas() is called from the commit code
396 * to bring the superblock buffer into the current transaction
397 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
399 * For now we just look at each field allowed to change and change
403 xfs_trans_apply_sb_deltas(
410 bp
= xfs_trans_getsb(tp
, tp
->t_mountp
, 0);
411 sbp
= XFS_BUF_TO_SBP(bp
);
414 * Check that superblock mods match the mods made to AGF counters.
416 ASSERT((tp
->t_fdblocks_delta
+ tp
->t_res_fdblocks_delta
) ==
417 (tp
->t_ag_freeblks_delta
+ tp
->t_ag_flist_delta
+
418 tp
->t_ag_btree_delta
));
421 * Only update the superblock counters if we are logging them
423 if (!xfs_sb_version_haslazysbcount(&(tp
->t_mountp
->m_sb
))) {
424 if (tp
->t_icount_delta
)
425 be64_add_cpu(&sbp
->sb_icount
, tp
->t_icount_delta
);
426 if (tp
->t_ifree_delta
)
427 be64_add_cpu(&sbp
->sb_ifree
, tp
->t_ifree_delta
);
428 if (tp
->t_fdblocks_delta
)
429 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_fdblocks_delta
);
430 if (tp
->t_res_fdblocks_delta
)
431 be64_add_cpu(&sbp
->sb_fdblocks
, tp
->t_res_fdblocks_delta
);
434 if (tp
->t_frextents_delta
)
435 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_frextents_delta
);
436 if (tp
->t_res_frextents_delta
)
437 be64_add_cpu(&sbp
->sb_frextents
, tp
->t_res_frextents_delta
);
439 if (tp
->t_dblocks_delta
) {
440 be64_add_cpu(&sbp
->sb_dblocks
, tp
->t_dblocks_delta
);
443 if (tp
->t_agcount_delta
) {
444 be32_add_cpu(&sbp
->sb_agcount
, tp
->t_agcount_delta
);
447 if (tp
->t_imaxpct_delta
) {
448 sbp
->sb_imax_pct
+= tp
->t_imaxpct_delta
;
451 if (tp
->t_rextsize_delta
) {
452 be32_add_cpu(&sbp
->sb_rextsize
, tp
->t_rextsize_delta
);
455 if (tp
->t_rbmblocks_delta
) {
456 be32_add_cpu(&sbp
->sb_rbmblocks
, tp
->t_rbmblocks_delta
);
459 if (tp
->t_rblocks_delta
) {
460 be64_add_cpu(&sbp
->sb_rblocks
, tp
->t_rblocks_delta
);
463 if (tp
->t_rextents_delta
) {
464 be64_add_cpu(&sbp
->sb_rextents
, tp
->t_rextents_delta
);
467 if (tp
->t_rextslog_delta
) {
468 sbp
->sb_rextslog
+= tp
->t_rextslog_delta
;
472 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_SB_BUF
);
475 * Log the whole thing, the fields are noncontiguous.
477 xfs_trans_log_buf(tp
, bp
, 0, sizeof(xfs_dsb_t
) - 1);
480 * Since all the modifiable fields are contiguous, we
481 * can get away with this.
483 xfs_trans_log_buf(tp
, bp
, offsetof(xfs_dsb_t
, sb_icount
),
484 offsetof(xfs_dsb_t
, sb_frextents
) +
485 sizeof(sbp
->sb_frextents
) - 1);
489 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
490 * and apply superblock counter changes to the in-core superblock. The
491 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
492 * applied to the in-core superblock. The idea is that that has already been
495 * This is done efficiently with a single call to xfs_mod_incore_sb_batch().
496 * However, we have to ensure that we only modify each superblock field only
497 * once because the application of the delta values may not be atomic. That can
498 * lead to ENOSPC races occurring if we have two separate modifcations of the
499 * free space counter to put back the entire reservation and then take away
502 * If we are not logging superblock counters, then the inode allocated/free and
503 * used block counts are not updated in the on disk superblock. In this case,
504 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
505 * still need to update the incore superblock with the changes.
508 xfs_trans_unreserve_and_mod_sb(
511 xfs_mod_sb_t msb
[9]; /* If you add cases, add entries */
513 xfs_mount_t
*mp
= tp
->t_mountp
;
516 bool rsvd
= (tp
->t_flags
& XFS_TRANS_RESERVE
) != 0;
517 int64_t blkdelta
= 0;
518 int64_t rtxdelta
= 0;
520 int64_t ifreedelta
= 0;
524 /* calculate deltas */
525 if (tp
->t_blk_res
> 0)
526 blkdelta
= tp
->t_blk_res
;
527 if ((tp
->t_fdblocks_delta
!= 0) &&
528 (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
529 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)))
530 blkdelta
+= tp
->t_fdblocks_delta
;
532 if (tp
->t_rtx_res
> 0)
533 rtxdelta
= tp
->t_rtx_res
;
534 if ((tp
->t_frextents_delta
!= 0) &&
535 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
))
536 rtxdelta
+= tp
->t_frextents_delta
;
538 if (xfs_sb_version_haslazysbcount(&mp
->m_sb
) ||
539 (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)) {
540 idelta
= tp
->t_icount_delta
;
541 ifreedelta
= tp
->t_ifree_delta
;
544 /* apply the per-cpu counters */
546 error
= xfs_mod_fdblocks(mp
, blkdelta
, rsvd
);
552 error
= xfs_mod_icount(mp
, idelta
);
554 goto out_undo_fdblocks
;
558 error
= xfs_mod_ifree(mp
, ifreedelta
);
560 goto out_undo_icount
;
563 /* apply remaining deltas */
565 error
= xfs_mod_frextents(mp
, rtxdelta
);
570 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
) {
571 if (tp
->t_dblocks_delta
!= 0) {
572 msbp
->msb_field
= XFS_SBS_DBLOCKS
;
573 msbp
->msb_delta
= tp
->t_dblocks_delta
;
576 if (tp
->t_agcount_delta
!= 0) {
577 msbp
->msb_field
= XFS_SBS_AGCOUNT
;
578 msbp
->msb_delta
= tp
->t_agcount_delta
;
581 if (tp
->t_imaxpct_delta
!= 0) {
582 msbp
->msb_field
= XFS_SBS_IMAX_PCT
;
583 msbp
->msb_delta
= tp
->t_imaxpct_delta
;
586 if (tp
->t_rextsize_delta
!= 0) {
587 msbp
->msb_field
= XFS_SBS_REXTSIZE
;
588 msbp
->msb_delta
= tp
->t_rextsize_delta
;
591 if (tp
->t_rbmblocks_delta
!= 0) {
592 msbp
->msb_field
= XFS_SBS_RBMBLOCKS
;
593 msbp
->msb_delta
= tp
->t_rbmblocks_delta
;
596 if (tp
->t_rblocks_delta
!= 0) {
597 msbp
->msb_field
= XFS_SBS_RBLOCKS
;
598 msbp
->msb_delta
= tp
->t_rblocks_delta
;
601 if (tp
->t_rextents_delta
!= 0) {
602 msbp
->msb_field
= XFS_SBS_REXTENTS
;
603 msbp
->msb_delta
= tp
->t_rextents_delta
;
606 if (tp
->t_rextslog_delta
!= 0) {
607 msbp
->msb_field
= XFS_SBS_REXTSLOG
;
608 msbp
->msb_delta
= tp
->t_rextslog_delta
;
614 * If we need to change anything, do it.
617 error
= xfs_mod_incore_sb_batch(tp
->t_mountp
, msb
,
618 (uint
)(msbp
- msb
), rsvd
);
620 goto out_undo_frextents
;
627 xfs_mod_frextents(mp
, -rtxdelta
);
630 xfs_mod_ifree(mp
, -ifreedelta
);
633 xfs_mod_icount(mp
, -idelta
);
636 xfs_mod_fdblocks(mp
, -blkdelta
, rsvd
);
643 * Add the given log item to the transaction's list of log items.
645 * The log item will now point to its new descriptor with its li_desc field.
649 struct xfs_trans
*tp
,
650 struct xfs_log_item
*lip
)
652 struct xfs_log_item_desc
*lidp
;
654 ASSERT(lip
->li_mountp
== tp
->t_mountp
);
655 ASSERT(lip
->li_ailp
== tp
->t_mountp
->m_ail
);
657 lidp
= kmem_zone_zalloc(xfs_log_item_desc_zone
, KM_SLEEP
| KM_NOFS
);
659 lidp
->lid_item
= lip
;
661 list_add_tail(&lidp
->lid_trans
, &tp
->t_items
);
667 xfs_trans_free_item_desc(
668 struct xfs_log_item_desc
*lidp
)
670 list_del_init(&lidp
->lid_trans
);
671 kmem_zone_free(xfs_log_item_desc_zone
, lidp
);
675 * Unlink and free the given descriptor.
679 struct xfs_log_item
*lip
)
681 xfs_trans_free_item_desc(lip
->li_desc
);
686 * Unlock all of the items of a transaction and free all the descriptors
687 * of that transaction.
690 xfs_trans_free_items(
691 struct xfs_trans
*tp
,
692 xfs_lsn_t commit_lsn
,
695 struct xfs_log_item_desc
*lidp
, *next
;
697 list_for_each_entry_safe(lidp
, next
, &tp
->t_items
, lid_trans
) {
698 struct xfs_log_item
*lip
= lidp
->lid_item
;
702 if (commit_lsn
!= NULLCOMMITLSN
)
703 lip
->li_ops
->iop_committing(lip
, commit_lsn
);
704 if (flags
& XFS_TRANS_ABORT
)
705 lip
->li_flags
|= XFS_LI_ABORTED
;
706 lip
->li_ops
->iop_unlock(lip
);
708 xfs_trans_free_item_desc(lidp
);
713 xfs_log_item_batch_insert(
714 struct xfs_ail
*ailp
,
715 struct xfs_ail_cursor
*cur
,
716 struct xfs_log_item
**log_items
,
718 xfs_lsn_t commit_lsn
)
722 spin_lock(&ailp
->xa_lock
);
723 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
724 xfs_trans_ail_update_bulk(ailp
, cur
, log_items
, nr_items
, commit_lsn
);
726 for (i
= 0; i
< nr_items
; i
++) {
727 struct xfs_log_item
*lip
= log_items
[i
];
729 lip
->li_ops
->iop_unpin(lip
, 0);
734 * Bulk operation version of xfs_trans_committed that takes a log vector of
735 * items to insert into the AIL. This uses bulk AIL insertion techniques to
736 * minimise lock traffic.
738 * If we are called with the aborted flag set, it is because a log write during
739 * a CIL checkpoint commit has failed. In this case, all the items in the
740 * checkpoint have already gone through iop_commited and iop_unlock, which
741 * means that checkpoint commit abort handling is treated exactly the same
742 * as an iclog write error even though we haven't started any IO yet. Hence in
743 * this case all we need to do is iop_committed processing, followed by an
744 * iop_unpin(aborted) call.
746 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
747 * at the end of the AIL, the insert cursor avoids the need to walk
748 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
749 * call. This saves a lot of needless list walking and is a net win, even
750 * though it slightly increases that amount of AIL lock traffic to set it up
754 xfs_trans_committed_bulk(
755 struct xfs_ail
*ailp
,
756 struct xfs_log_vec
*log_vector
,
757 xfs_lsn_t commit_lsn
,
760 #define LOG_ITEM_BATCH_SIZE 32
761 struct xfs_log_item
*log_items
[LOG_ITEM_BATCH_SIZE
];
762 struct xfs_log_vec
*lv
;
763 struct xfs_ail_cursor cur
;
766 spin_lock(&ailp
->xa_lock
);
767 xfs_trans_ail_cursor_last(ailp
, &cur
, commit_lsn
);
768 spin_unlock(&ailp
->xa_lock
);
770 /* unpin all the log items */
771 for (lv
= log_vector
; lv
; lv
= lv
->lv_next
) {
772 struct xfs_log_item
*lip
= lv
->lv_item
;
776 lip
->li_flags
|= XFS_LI_ABORTED
;
777 item_lsn
= lip
->li_ops
->iop_committed(lip
, commit_lsn
);
779 /* item_lsn of -1 means the item needs no further processing */
780 if (XFS_LSN_CMP(item_lsn
, (xfs_lsn_t
)-1) == 0)
784 * if we are aborting the operation, no point in inserting the
785 * object into the AIL as we are in a shutdown situation.
788 ASSERT(XFS_FORCED_SHUTDOWN(ailp
->xa_mount
));
789 lip
->li_ops
->iop_unpin(lip
, 1);
793 if (item_lsn
!= commit_lsn
) {
796 * Not a bulk update option due to unusual item_lsn.
797 * Push into AIL immediately, rechecking the lsn once
798 * we have the ail lock. Then unpin the item. This does
799 * not affect the AIL cursor the bulk insert path is
802 spin_lock(&ailp
->xa_lock
);
803 if (XFS_LSN_CMP(item_lsn
, lip
->li_lsn
) > 0)
804 xfs_trans_ail_update(ailp
, lip
, item_lsn
);
806 spin_unlock(&ailp
->xa_lock
);
807 lip
->li_ops
->iop_unpin(lip
, 0);
811 /* Item is a candidate for bulk AIL insert. */
812 log_items
[i
++] = lv
->lv_item
;
813 if (i
>= LOG_ITEM_BATCH_SIZE
) {
814 xfs_log_item_batch_insert(ailp
, &cur
, log_items
,
815 LOG_ITEM_BATCH_SIZE
, commit_lsn
);
820 /* make sure we insert the remainder! */
822 xfs_log_item_batch_insert(ailp
, &cur
, log_items
, i
, commit_lsn
);
824 spin_lock(&ailp
->xa_lock
);
825 xfs_trans_ail_cursor_done(&cur
);
826 spin_unlock(&ailp
->xa_lock
);
830 * Commit the given transaction to the log.
832 * XFS disk error handling mechanism is not based on a typical
833 * transaction abort mechanism. Logically after the filesystem
834 * gets marked 'SHUTDOWN', we can't let any new transactions
835 * be durable - ie. committed to disk - because some metadata might
836 * be inconsistent. In such cases, this returns an error, and the
837 * caller may assume that all locked objects joined to the transaction
838 * have already been unlocked as if the commit had succeeded.
839 * Do not reference the transaction structure after this call.
843 struct xfs_trans
*tp
,
846 struct xfs_mount
*mp
= tp
->t_mountp
;
847 xfs_lsn_t commit_lsn
= -1;
850 int sync
= tp
->t_flags
& XFS_TRANS_SYNC
;
853 * Determine whether this commit is releasing a permanent
854 * log reservation or not.
856 if (flags
& XFS_TRANS_RELEASE_LOG_RES
) {
857 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
858 log_flags
= XFS_LOG_REL_PERM_RESERV
;
862 * If there is nothing to be logged by the transaction,
863 * then unlock all of the items associated with the
864 * transaction and free the transaction structure.
865 * Also make sure to return any reserved blocks to
868 if (!(tp
->t_flags
& XFS_TRANS_DIRTY
))
871 if (XFS_FORCED_SHUTDOWN(mp
)) {
876 ASSERT(tp
->t_ticket
!= NULL
);
879 * If we need to update the superblock, then do it now.
881 if (tp
->t_flags
& XFS_TRANS_SB_DIRTY
)
882 xfs_trans_apply_sb_deltas(tp
);
883 xfs_trans_apply_dquot_deltas(tp
);
885 xfs_log_commit_cil(mp
, tp
, &commit_lsn
, flags
);
887 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
891 * If the transaction needs to be synchronous, then force the
892 * log out now and wait for it.
895 error
= _xfs_log_force_lsn(mp
, commit_lsn
, XFS_LOG_SYNC
, NULL
);
896 XFS_STATS_INC(xs_trans_sync
);
898 XFS_STATS_INC(xs_trans_async
);
904 xfs_trans_unreserve_and_mod_sb(tp
);
907 * It is indeed possible for the transaction to be not dirty but
908 * the dqinfo portion to be. All that means is that we have some
909 * (non-persistent) quota reservations that need to be unreserved.
911 xfs_trans_unreserve_and_mod_dquots(tp
);
913 commit_lsn
= xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
914 if (commit_lsn
== -1 && !error
)
917 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
918 xfs_trans_free_items(tp
, NULLCOMMITLSN
, error
? XFS_TRANS_ABORT
: 0);
921 XFS_STATS_INC(xs_trans_empty
);
926 * Unlock all of the transaction's items and free the transaction.
927 * The transaction must not have modified any of its items, because
928 * there is no way to restore them to their previous state.
930 * If the transaction has made a log reservation, make sure to release
939 xfs_mount_t
*mp
= tp
->t_mountp
;
942 * See if the caller is being too lazy to figure out if
943 * the transaction really needs an abort.
945 if ((flags
& XFS_TRANS_ABORT
) && !(tp
->t_flags
& XFS_TRANS_DIRTY
))
946 flags
&= ~XFS_TRANS_ABORT
;
948 * See if the caller is relying on us to shut down the
949 * filesystem. This happens in paths where we detect
950 * corruption and decide to give up.
952 if ((tp
->t_flags
& XFS_TRANS_DIRTY
) && !XFS_FORCED_SHUTDOWN(mp
)) {
953 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW
, mp
);
954 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
957 if (!(flags
& XFS_TRANS_ABORT
) && !XFS_FORCED_SHUTDOWN(mp
)) {
958 struct xfs_log_item_desc
*lidp
;
960 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
)
961 ASSERT(!(lidp
->lid_item
->li_type
== XFS_LI_EFD
));
964 xfs_trans_unreserve_and_mod_sb(tp
);
965 xfs_trans_unreserve_and_mod_dquots(tp
);
968 if (flags
& XFS_TRANS_RELEASE_LOG_RES
) {
969 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
970 log_flags
= XFS_LOG_REL_PERM_RESERV
;
974 xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
977 /* mark this thread as no longer being in a transaction */
978 current_restore_flags_nested(&tp
->t_pflags
, PF_FSTRANS
);
980 xfs_trans_free_items(tp
, NULLCOMMITLSN
, flags
);
985 * Roll from one trans in the sequence of PERMANENT transactions to
986 * the next: permanent transactions are only flushed out when
987 * committed with XFS_TRANS_RELEASE_LOG_RES, but we still want as soon
988 * as possible to let chunks of it go to the log. So we commit the
989 * chunk we've been working on and get a new transaction to continue.
993 struct xfs_trans
**tpp
,
994 struct xfs_inode
*dp
)
996 struct xfs_trans
*trans
;
997 struct xfs_trans_res tres
;
1001 * Ensure that the inode is always logged.
1004 xfs_trans_log_inode(trans
, dp
, XFS_ILOG_CORE
);
1007 * Copy the critical parameters from one trans to the next.
1009 tres
.tr_logres
= trans
->t_log_res
;
1010 tres
.tr_logcount
= trans
->t_log_count
;
1011 *tpp
= xfs_trans_dup(trans
);
1014 * Commit the current transaction.
1015 * If this commit failed, then it'd just unlock those items that
1016 * are not marked ihold. That also means that a filesystem shutdown
1017 * is in progress. The caller takes the responsibility to cancel
1018 * the duplicate transaction that gets returned.
1020 error
= xfs_trans_commit(trans
, 0);
1027 * transaction commit worked ok so we can drop the extra ticket
1028 * reference that we gained in xfs_trans_dup()
1030 xfs_log_ticket_put(trans
->t_ticket
);
1034 * Reserve space in the log for th next transaction.
1035 * This also pushes items in the "AIL", the list of logged items,
1036 * out to disk if they are taking up space at the tail of the log
1037 * that we want to use. This requires that either nothing be locked
1038 * across this call, or that anything that is locked be logged in
1039 * the prior and the next transactions.
1041 tres
.tr_logflags
= XFS_TRANS_PERM_LOG_RES
;
1042 error
= xfs_trans_reserve(trans
, &tres
, 0, 0);
1044 * Ensure that the inode is in the new transaction and locked.
1049 xfs_trans_ijoin(trans
, dp
, 0);