2 * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
22 #include "xfs_trans.h"
23 #include "xfs_trans_priv.h"
24 #include "xfs_log_priv.h"
27 #include "xfs_mount.h"
28 #include "xfs_error.h"
29 #include "xfs_alloc.h"
30 #include "xfs_extent_busy.h"
31 #include "xfs_discard.h"
34 * Allocate a new ticket. Failing to get a new ticket makes it really hard to
35 * recover, so we don't allow failure here. Also, we allocate in a context that
36 * we don't want to be issuing transactions from, so we need to tell the
37 * allocation code this as well.
39 * We don't reserve any space for the ticket - we are going to steal whatever
40 * space we require from transactions as they commit. To ensure we reserve all
41 * the space required, we need to set the current reservation of the ticket to
42 * zero so that we know to steal the initial transaction overhead from the
43 * first transaction commit.
45 static struct xlog_ticket
*
46 xlog_cil_ticket_alloc(
49 struct xlog_ticket
*tic
;
51 tic
= xlog_ticket_alloc(log
, 0, 1, XFS_TRANSACTION
, 0,
53 tic
->t_trans_type
= XFS_TRANS_CHECKPOINT
;
56 * set the current reservation to zero so we know to steal the basic
57 * transaction overhead reservation from the first transaction commit.
64 * After the first stage of log recovery is done, we know where the head and
65 * tail of the log are. We need this log initialisation done before we can
66 * initialise the first CIL checkpoint context.
68 * Here we allocate a log ticket to track space usage during a CIL push. This
69 * ticket is passed to xlog_write() directly so that we don't slowly leak log
70 * space by failing to account for space used by log headers and additional
71 * region headers for split regions.
74 xlog_cil_init_post_recovery(
77 log
->l_cilp
->xc_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
78 log
->l_cilp
->xc_ctx
->sequence
= 1;
79 log
->l_cilp
->xc_ctx
->commit_lsn
= xlog_assign_lsn(log
->l_curr_cycle
,
84 xlog_cil_lv_item_format(
85 struct xfs_log_item
*lip
,
86 struct xfs_log_vec
*lv
)
91 /* format new vectors into array */
92 lip
->li_ops
->iop_format(lip
, lv
->lv_iovecp
);
94 /* copy data into existing array */
96 for (index
= 0; index
< lv
->lv_niovecs
; index
++) {
97 struct xfs_log_iovec
*vec
= &lv
->lv_iovecp
[index
];
99 memcpy(ptr
, vec
->i_addr
, vec
->i_len
);
105 * some size calculations for log vectors over-estimate, so the caller
106 * doesn't know the amount of space actually used by the item. Return
107 * the byte count to the caller so they can check and store it
110 return ptr
- lv
->lv_buf
;
114 * Format log item into a flat buffers
116 * For delayed logging, we need to hold a formatted buffer containing all the
117 * changes on the log item. This enables us to relog the item in memory and
118 * write it out asynchronously without needing to relock the object that was
119 * modified at the time it gets written into the iclog.
121 * This function builds a vector for the changes in each log item in the
122 * transaction. It then works out the length of the buffer needed for each log
123 * item, allocates them and formats the vector for the item into the buffer.
124 * The buffer is then attached to the log item are then inserted into the
125 * Committed Item List for tracking until the next checkpoint is written out.
127 * We don't set up region headers during this process; we simply copy the
128 * regions into the flat buffer. We can do this because we still have to do a
129 * formatting step to write the regions into the iclog buffer. Writing the
130 * ophdrs during the iclog write means that we can support splitting large
131 * regions across iclog boundares without needing a change in the format of the
132 * item/region encapsulation.
134 * Hence what we need to do now is change the rewrite the vector array to point
135 * to the copied region inside the buffer we just allocated. This allows us to
136 * format the regions into the iclog as though they are being formatted
137 * directly out of the objects themselves.
139 static struct xfs_log_vec
*
140 xlog_cil_prepare_log_vecs(
141 struct xfs_trans
*tp
)
143 struct xfs_log_item_desc
*lidp
;
144 struct xfs_log_vec
*prev_lv
= NULL
;
145 struct xfs_log_vec
*ret_lv
= NULL
;
148 /* Bail out if we didn't find a log item. */
149 if (list_empty(&tp
->t_items
)) {
154 list_for_each_entry(lidp
, &tp
->t_items
, lid_trans
) {
155 struct xfs_log_item
*lip
= lidp
->lid_item
;
156 struct xfs_log_vec
*lv
;
160 bool ordered
= false;
162 /* Skip items which aren't dirty in this transaction. */
163 if (!(lidp
->lid_flags
& XFS_LID_DIRTY
))
166 /* get number of vecs and size of data to be stored */
167 lip
->li_ops
->iop_size(lip
, &niovecs
, &nbytes
);
169 /* Skip items that do not have any vectors for writing */
174 * Ordered items need to be tracked but we do not wish to write
175 * them. We need a logvec to track the object, but we do not
176 * need an iovec or buffer to be allocated for copying data.
178 if (niovecs
== XFS_LOG_VEC_ORDERED
) {
184 /* calc buffer size */
185 buf_size
= sizeof(struct xfs_log_vec
) + nbytes
+
186 niovecs
* sizeof(struct xfs_log_iovec
);
188 /* allocate new data chunk */
189 lv
= kmem_zalloc(buf_size
, KM_SLEEP
|KM_NOFS
);
191 lv
->lv_size
= buf_size
;
192 lv
->lv_niovecs
= niovecs
;
194 /* track as an ordered logvec */
195 ASSERT(lip
->li_lv
== NULL
);
196 lv
->lv_buf_len
= XFS_LOG_VEC_ORDERED
;
200 /* The allocated iovec region lies beyond the log vector. */
201 lv
->lv_iovecp
= (struct xfs_log_iovec
*)&lv
[1];
203 /* The allocated data region lies beyond the iovec region */
204 lv
->lv_buf
= (char *)lv
+ buf_size
- nbytes
;
206 lv
->lv_buf_len
= xlog_cil_lv_item_format(lip
, lv
);
207 ASSERT(lv
->lv_buf_len
<= nbytes
);
212 prev_lv
->lv_next
= lv
;
220 * Prepare the log item for insertion into the CIL. Calculate the difference in
221 * log space and vectors it will consume, and if it is a new item pin it as
225 xfs_cil_prepare_item(
227 struct xfs_log_vec
*lv
,
231 struct xfs_log_vec
*old
= lv
->lv_item
->li_lv
;
234 /* existing lv on log item, space used is a delta */
235 ASSERT((old
->lv_buf
&& old
->lv_buf_len
&& old
->lv_niovecs
) ||
236 old
->lv_buf_len
== XFS_LOG_VEC_ORDERED
);
239 * If the new item is ordered, keep the old one that is already
240 * tracking dirty or ordered regions
242 if (lv
->lv_buf_len
== XFS_LOG_VEC_ORDERED
) {
248 *len
+= lv
->lv_buf_len
- old
->lv_buf_len
;
249 *diff_iovecs
+= lv
->lv_niovecs
- old
->lv_niovecs
;
252 /* new lv, must pin the log item */
253 ASSERT(!lv
->lv_item
->li_lv
);
255 if (lv
->lv_buf_len
!= XFS_LOG_VEC_ORDERED
) {
256 *len
+= lv
->lv_buf_len
;
257 *diff_iovecs
+= lv
->lv_niovecs
;
259 IOP_PIN(lv
->lv_item
);
263 /* attach new log vector to log item */
264 lv
->lv_item
->li_lv
= lv
;
267 * If this is the first time the item is being committed to the
268 * CIL, store the sequence number on the log item so we can
269 * tell in future commits whether this is the first checkpoint
270 * the item is being committed into.
272 if (!lv
->lv_item
->li_seq
)
273 lv
->lv_item
->li_seq
= log
->l_cilp
->xc_ctx
->sequence
;
277 * Insert the log items into the CIL and calculate the difference in space
278 * consumed by the item. Add the space to the checkpoint ticket and calculate
279 * if the change requires additional log metadata. If it does, take that space
280 * as well. Remove the amount of space we added to the checkpoint ticket from
281 * the current transaction ticket so that the accounting works out correctly.
284 xlog_cil_insert_items(
286 struct xfs_log_vec
*log_vector
,
287 struct xlog_ticket
*ticket
)
289 struct xfs_cil
*cil
= log
->l_cilp
;
290 struct xfs_cil_ctx
*ctx
= cil
->xc_ctx
;
291 struct xfs_log_vec
*lv
;
299 * Do all the accounting aggregation and switching of log vectors
300 * around in a separate loop to the insertion of items into the CIL.
301 * Then we can do a separate loop to update the CIL within a single
302 * lock/unlock pair. This reduces the number of round trips on the CIL
303 * lock from O(nr_logvectors) to O(1) and greatly reduces the overall
304 * hold time for the transaction commit.
306 * If this is the first time the item is being placed into the CIL in
307 * this context, pin it so it can't be written to disk until the CIL is
308 * flushed to the iclog and the iclog written to disk.
310 * We can do this safely because the context can't checkpoint until we
311 * are done so it doesn't matter exactly how we update the CIL.
313 spin_lock(&cil
->xc_cil_lock
);
314 for (lv
= log_vector
; lv
; ) {
315 struct xfs_log_vec
*next
= lv
->lv_next
;
317 ASSERT(lv
->lv_item
->li_lv
|| list_empty(&lv
->lv_item
->li_cil
));
321 * xfs_cil_prepare_item() may free the lv, so move the item on
324 list_move_tail(&lv
->lv_item
->li_cil
, &cil
->xc_cil
);
325 xfs_cil_prepare_item(log
, lv
, &len
, &diff_iovecs
);
329 /* account for space used by new iovec headers */
330 len
+= diff_iovecs
* sizeof(xlog_op_header_t
);
331 ctx
->nvecs
+= diff_iovecs
;
334 * Now transfer enough transaction reservation to the context ticket
335 * for the checkpoint. The context ticket is special - the unit
336 * reservation has to grow as well as the current reservation as we
337 * steal from tickets so we can correctly determine the space used
338 * during the transaction commit.
340 if (ctx
->ticket
->t_curr_res
== 0) {
341 /* first commit in checkpoint, steal the header reservation */
342 ASSERT(ticket
->t_curr_res
>= ctx
->ticket
->t_unit_res
+ len
);
343 ctx
->ticket
->t_curr_res
= ctx
->ticket
->t_unit_res
;
344 ticket
->t_curr_res
-= ctx
->ticket
->t_unit_res
;
347 /* do we need space for more log record headers? */
348 iclog_space
= log
->l_iclog_size
- log
->l_iclog_hsize
;
349 if (len
> 0 && (ctx
->space_used
/ iclog_space
!=
350 (ctx
->space_used
+ len
) / iclog_space
)) {
353 hdrs
= (len
+ iclog_space
- 1) / iclog_space
;
354 /* need to take into account split region headers, too */
355 hdrs
*= log
->l_iclog_hsize
+ sizeof(struct xlog_op_header
);
356 ctx
->ticket
->t_unit_res
+= hdrs
;
357 ctx
->ticket
->t_curr_res
+= hdrs
;
358 ticket
->t_curr_res
-= hdrs
;
359 ASSERT(ticket
->t_curr_res
>= len
);
361 ticket
->t_curr_res
-= len
;
362 ctx
->space_used
+= len
;
364 spin_unlock(&cil
->xc_cil_lock
);
368 xlog_cil_free_logvec(
369 struct xfs_log_vec
*log_vector
)
371 struct xfs_log_vec
*lv
;
373 for (lv
= log_vector
; lv
; ) {
374 struct xfs_log_vec
*next
= lv
->lv_next
;
381 * Mark all items committed and clear busy extents. We free the log vector
382 * chains in a separate pass so that we unpin the log items as quickly as
390 struct xfs_cil_ctx
*ctx
= args
;
391 struct xfs_mount
*mp
= ctx
->cil
->xc_log
->l_mp
;
393 xfs_trans_committed_bulk(ctx
->cil
->xc_log
->l_ailp
, ctx
->lv_chain
,
394 ctx
->start_lsn
, abort
);
396 xfs_extent_busy_sort(&ctx
->busy_extents
);
397 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
,
398 (mp
->m_flags
& XFS_MOUNT_DISCARD
) && !abort
);
400 spin_lock(&ctx
->cil
->xc_cil_lock
);
401 list_del(&ctx
->committing
);
402 spin_unlock(&ctx
->cil
->xc_cil_lock
);
404 xlog_cil_free_logvec(ctx
->lv_chain
);
406 if (!list_empty(&ctx
->busy_extents
)) {
407 ASSERT(mp
->m_flags
& XFS_MOUNT_DISCARD
);
409 xfs_discard_extents(mp
, &ctx
->busy_extents
);
410 xfs_extent_busy_clear(mp
, &ctx
->busy_extents
, false);
417 * Push the Committed Item List to the log. If @push_seq flag is zero, then it
418 * is a background flush and so we can chose to ignore it. Otherwise, if the
419 * current sequence is the same as @push_seq we need to do a flush. If
420 * @push_seq is less than the current sequence, then it has already been
421 * flushed and we don't need to do anything - the caller will wait for it to
422 * complete if necessary.
424 * @push_seq is a value rather than a flag because that allows us to do an
425 * unlocked check of the sequence number for a match. Hence we can allows log
426 * forces to run racily and not issue pushes for the same sequence twice. If we
427 * get a race between multiple pushes for the same sequence they will block on
428 * the first one and then abort, hence avoiding needless pushes.
434 struct xfs_cil
*cil
= log
->l_cilp
;
435 struct xfs_log_vec
*lv
;
436 struct xfs_cil_ctx
*ctx
;
437 struct xfs_cil_ctx
*new_ctx
;
438 struct xlog_in_core
*commit_iclog
;
439 struct xlog_ticket
*tic
;
442 struct xfs_trans_header thdr
;
443 struct xfs_log_iovec lhdr
;
444 struct xfs_log_vec lvhdr
= { NULL
};
445 xfs_lsn_t commit_lsn
;
451 new_ctx
= kmem_zalloc(sizeof(*new_ctx
), KM_SLEEP
|KM_NOFS
);
452 new_ctx
->ticket
= xlog_cil_ticket_alloc(log
);
454 down_write(&cil
->xc_ctx_lock
);
457 spin_lock(&cil
->xc_cil_lock
);
458 push_seq
= cil
->xc_push_seq
;
459 ASSERT(push_seq
<= ctx
->sequence
);
462 * Check if we've anything to push. If there is nothing, then we don't
463 * move on to a new sequence number and so we have to be able to push
464 * this sequence again later.
466 if (list_empty(&cil
->xc_cil
)) {
467 cil
->xc_push_seq
= 0;
468 spin_unlock(&cil
->xc_cil_lock
);
471 spin_unlock(&cil
->xc_cil_lock
);
474 /* check for a previously pushed seqeunce */
475 if (push_seq
< cil
->xc_ctx
->sequence
)
479 * pull all the log vectors off the items in the CIL, and
480 * remove the items from the CIL. We don't need the CIL lock
481 * here because it's only needed on the transaction commit
482 * side which is currently locked out by the flush lock.
486 while (!list_empty(&cil
->xc_cil
)) {
487 struct xfs_log_item
*item
;
489 item
= list_first_entry(&cil
->xc_cil
,
490 struct xfs_log_item
, li_cil
);
491 list_del_init(&item
->li_cil
);
493 ctx
->lv_chain
= item
->li_lv
;
495 lv
->lv_next
= item
->li_lv
;
498 num_iovecs
+= lv
->lv_niovecs
;
502 * initialise the new context and attach it to the CIL. Then attach
503 * the current context to the CIL committing lsit so it can be found
504 * during log forces to extract the commit lsn of the sequence that
505 * needs to be forced.
507 INIT_LIST_HEAD(&new_ctx
->committing
);
508 INIT_LIST_HEAD(&new_ctx
->busy_extents
);
509 new_ctx
->sequence
= ctx
->sequence
+ 1;
511 cil
->xc_ctx
= new_ctx
;
514 * mirror the new sequence into the cil structure so that we can do
515 * unlocked checks against the current sequence in log forces without
516 * risking deferencing a freed context pointer.
518 cil
->xc_current_sequence
= new_ctx
->sequence
;
521 * The switch is now done, so we can drop the context lock and move out
522 * of a shared context. We can't just go straight to the commit record,
523 * though - we need to synchronise with previous and future commits so
524 * that the commit records are correctly ordered in the log to ensure
525 * that we process items during log IO completion in the correct order.
527 * For example, if we get an EFI in one checkpoint and the EFD in the
528 * next (e.g. due to log forces), we do not want the checkpoint with
529 * the EFD to be committed before the checkpoint with the EFI. Hence
530 * we must strictly order the commit records of the checkpoints so
531 * that: a) the checkpoint callbacks are attached to the iclogs in the
532 * correct order; and b) the checkpoints are replayed in correct order
535 * Hence we need to add this context to the committing context list so
536 * that higher sequences will wait for us to write out a commit record
539 spin_lock(&cil
->xc_cil_lock
);
540 list_add(&ctx
->committing
, &cil
->xc_committing
);
541 spin_unlock(&cil
->xc_cil_lock
);
542 up_write(&cil
->xc_ctx_lock
);
545 * Build a checkpoint transaction header and write it to the log to
546 * begin the transaction. We need to account for the space used by the
547 * transaction header here as it is not accounted for in xlog_write().
549 * The LSN we need to pass to the log items on transaction commit is
550 * the LSN reported by the first log vector write. If we use the commit
551 * record lsn then we can move the tail beyond the grant write head.
554 thdr
.th_magic
= XFS_TRANS_HEADER_MAGIC
;
555 thdr
.th_type
= XFS_TRANS_CHECKPOINT
;
556 thdr
.th_tid
= tic
->t_tid
;
557 thdr
.th_num_items
= num_iovecs
;
559 lhdr
.i_len
= sizeof(xfs_trans_header_t
);
560 lhdr
.i_type
= XLOG_REG_TYPE_TRANSHDR
;
561 tic
->t_curr_res
-= lhdr
.i_len
+ sizeof(xlog_op_header_t
);
563 lvhdr
.lv_niovecs
= 1;
564 lvhdr
.lv_iovecp
= &lhdr
;
565 lvhdr
.lv_next
= ctx
->lv_chain
;
567 error
= xlog_write(log
, &lvhdr
, tic
, &ctx
->start_lsn
, NULL
, 0);
569 goto out_abort_free_ticket
;
572 * now that we've written the checkpoint into the log, strictly
573 * order the commit records so replay will get them in the right order.
576 spin_lock(&cil
->xc_cil_lock
);
577 list_for_each_entry(new_ctx
, &cil
->xc_committing
, committing
) {
579 * Higher sequences will wait for this one so skip them.
580 * Don't wait for own own sequence, either.
582 if (new_ctx
->sequence
>= ctx
->sequence
)
584 if (!new_ctx
->commit_lsn
) {
586 * It is still being pushed! Wait for the push to
587 * complete, then start again from the beginning.
589 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_cil_lock
);
593 spin_unlock(&cil
->xc_cil_lock
);
595 /* xfs_log_done always frees the ticket on error. */
596 commit_lsn
= xfs_log_done(log
->l_mp
, tic
, &commit_iclog
, 0);
597 if (commit_lsn
== -1)
600 /* attach all the transactions w/ busy extents to iclog */
601 ctx
->log_cb
.cb_func
= xlog_cil_committed
;
602 ctx
->log_cb
.cb_arg
= ctx
;
603 error
= xfs_log_notify(log
->l_mp
, commit_iclog
, &ctx
->log_cb
);
608 * now the checkpoint commit is complete and we've attached the
609 * callbacks to the iclog we can assign the commit LSN to the context
610 * and wake up anyone who is waiting for the commit to complete.
612 spin_lock(&cil
->xc_cil_lock
);
613 ctx
->commit_lsn
= commit_lsn
;
614 wake_up_all(&cil
->xc_commit_wait
);
615 spin_unlock(&cil
->xc_cil_lock
);
617 /* release the hounds! */
618 return xfs_log_release_iclog(log
->l_mp
, commit_iclog
);
621 up_write(&cil
->xc_ctx_lock
);
622 xfs_log_ticket_put(new_ctx
->ticket
);
626 out_abort_free_ticket
:
627 xfs_log_ticket_put(tic
);
629 xlog_cil_committed(ctx
, XFS_LI_ABORTED
);
630 return XFS_ERROR(EIO
);
635 struct work_struct
*work
)
637 struct xfs_cil
*cil
= container_of(work
, struct xfs_cil
,
639 xlog_cil_push(cil
->xc_log
);
643 * We need to push CIL every so often so we don't cache more than we can fit in
644 * the log. The limit really is that a checkpoint can't be more than half the
645 * log (the current checkpoint is not allowed to overwrite the previous
646 * checkpoint), but commit latency and memory usage limit this to a smaller
650 xlog_cil_push_background(
653 struct xfs_cil
*cil
= log
->l_cilp
;
656 * The cil won't be empty because we are called while holding the
657 * context lock so whatever we added to the CIL will still be there
659 ASSERT(!list_empty(&cil
->xc_cil
));
662 * don't do a background push if we haven't used up all the
663 * space available yet.
665 if (cil
->xc_ctx
->space_used
< XLOG_CIL_SPACE_LIMIT(log
))
668 spin_lock(&cil
->xc_cil_lock
);
669 if (cil
->xc_push_seq
< cil
->xc_current_sequence
) {
670 cil
->xc_push_seq
= cil
->xc_current_sequence
;
671 queue_work(log
->l_mp
->m_cil_workqueue
, &cil
->xc_push_work
);
673 spin_unlock(&cil
->xc_cil_lock
);
678 xlog_cil_push_foreground(
682 struct xfs_cil
*cil
= log
->l_cilp
;
687 ASSERT(push_seq
&& push_seq
<= cil
->xc_current_sequence
);
689 /* start on any pending background push to minimise wait time on it */
690 flush_work(&cil
->xc_push_work
);
693 * If the CIL is empty or we've already pushed the sequence then
694 * there's no work we need to do.
696 spin_lock(&cil
->xc_cil_lock
);
697 if (list_empty(&cil
->xc_cil
) || push_seq
<= cil
->xc_push_seq
) {
698 spin_unlock(&cil
->xc_cil_lock
);
702 cil
->xc_push_seq
= push_seq
;
703 spin_unlock(&cil
->xc_cil_lock
);
705 /* do the push now */
710 * Commit a transaction with the given vector to the Committed Item List.
712 * To do this, we need to format the item, pin it in memory if required and
713 * account for the space used by the transaction. Once we have done that we
714 * need to release the unused reservation for the transaction, attach the
715 * transaction to the checkpoint context so we carry the busy extents through
716 * to checkpoint completion, and then unlock all the items in the transaction.
718 * Called with the context lock already held in read mode to lock out
719 * background commit, returns without it held once background commits are
724 struct xfs_mount
*mp
,
725 struct xfs_trans
*tp
,
726 xfs_lsn_t
*commit_lsn
,
729 struct xlog
*log
= mp
->m_log
;
731 struct xfs_log_vec
*log_vector
;
733 if (flags
& XFS_TRANS_RELEASE_LOG_RES
)
734 log_flags
= XFS_LOG_REL_PERM_RESERV
;
737 * Do all the hard work of formatting items (including memory
738 * allocation) outside the CIL context lock. This prevents stalling CIL
739 * pushes when we are low on memory and a transaction commit spends a
740 * lot of time in memory reclaim.
742 log_vector
= xlog_cil_prepare_log_vecs(tp
);
746 /* lock out background commit */
747 down_read(&log
->l_cilp
->xc_ctx_lock
);
749 *commit_lsn
= log
->l_cilp
->xc_ctx
->sequence
;
751 /* xlog_cil_insert_items() destroys log_vector list */
752 xlog_cil_insert_items(log
, log_vector
, tp
->t_ticket
);
754 /* check we didn't blow the reservation */
755 if (tp
->t_ticket
->t_curr_res
< 0)
756 xlog_print_tic_res(log
->l_mp
, tp
->t_ticket
);
758 /* attach the transaction to the CIL if it has any busy extents */
759 if (!list_empty(&tp
->t_busy
)) {
760 spin_lock(&log
->l_cilp
->xc_cil_lock
);
761 list_splice_init(&tp
->t_busy
,
762 &log
->l_cilp
->xc_ctx
->busy_extents
);
763 spin_unlock(&log
->l_cilp
->xc_cil_lock
);
766 tp
->t_commit_lsn
= *commit_lsn
;
767 xfs_log_done(mp
, tp
->t_ticket
, NULL
, log_flags
);
768 xfs_trans_unreserve_and_mod_sb(tp
);
771 * Once all the items of the transaction have been copied to the CIL,
772 * the items can be unlocked and freed.
774 * This needs to be done before we drop the CIL context lock because we
775 * have to update state in the log items and unlock them before they go
776 * to disk. If we don't, then the CIL checkpoint can race with us and
777 * we can run checkpoint completion before we've updated and unlocked
778 * the log items. This affects (at least) processing of stale buffers,
781 xfs_trans_free_items(tp
, *commit_lsn
, 0);
783 xlog_cil_push_background(log
);
785 up_read(&log
->l_cilp
->xc_ctx_lock
);
790 * Conditionally push the CIL based on the sequence passed in.
792 * We only need to push if we haven't already pushed the sequence
793 * number given. Hence the only time we will trigger a push here is
794 * if the push sequence is the same as the current context.
796 * We return the current commit lsn to allow the callers to determine if a
797 * iclog flush is necessary following this call.
804 struct xfs_cil
*cil
= log
->l_cilp
;
805 struct xfs_cil_ctx
*ctx
;
806 xfs_lsn_t commit_lsn
= NULLCOMMITLSN
;
808 ASSERT(sequence
<= cil
->xc_current_sequence
);
811 * check to see if we need to force out the current context.
812 * xlog_cil_push() handles racing pushes for the same sequence,
813 * so no need to deal with it here.
815 xlog_cil_push_foreground(log
, sequence
);
818 * See if we can find a previous sequence still committing.
819 * We need to wait for all previous sequence commits to complete
820 * before allowing the force of push_seq to go ahead. Hence block
821 * on commits for those as well.
824 spin_lock(&cil
->xc_cil_lock
);
825 list_for_each_entry(ctx
, &cil
->xc_committing
, committing
) {
826 if (ctx
->sequence
> sequence
)
828 if (!ctx
->commit_lsn
) {
830 * It is still being pushed! Wait for the push to
831 * complete, then start again from the beginning.
833 xlog_wait(&cil
->xc_commit_wait
, &cil
->xc_cil_lock
);
836 if (ctx
->sequence
!= sequence
)
839 commit_lsn
= ctx
->commit_lsn
;
841 spin_unlock(&cil
->xc_cil_lock
);
846 * Check if the current log item was first committed in this sequence.
847 * We can't rely on just the log item being in the CIL, we have to check
848 * the recorded commit sequence number.
850 * Note: for this to be used in a non-racy manner, it has to be called with
851 * CIL flushing locked out. As a result, it should only be used during the
852 * transaction commit process when deciding what to format into the item.
855 xfs_log_item_in_current_chkpt(
856 struct xfs_log_item
*lip
)
858 struct xfs_cil_ctx
*ctx
;
860 if (list_empty(&lip
->li_cil
))
863 ctx
= lip
->li_mountp
->m_log
->l_cilp
->xc_ctx
;
866 * li_seq is written on the first commit of a log item to record the
867 * first checkpoint it is written to. Hence if it is different to the
868 * current sequence, we're in a new checkpoint.
870 if (XFS_LSN_CMP(lip
->li_seq
, ctx
->sequence
) != 0)
876 * Perform initial CIL structure initialisation.
883 struct xfs_cil_ctx
*ctx
;
885 cil
= kmem_zalloc(sizeof(*cil
), KM_SLEEP
|KM_MAYFAIL
);
889 ctx
= kmem_zalloc(sizeof(*ctx
), KM_SLEEP
|KM_MAYFAIL
);
895 INIT_WORK(&cil
->xc_push_work
, xlog_cil_push_work
);
896 INIT_LIST_HEAD(&cil
->xc_cil
);
897 INIT_LIST_HEAD(&cil
->xc_committing
);
898 spin_lock_init(&cil
->xc_cil_lock
);
899 init_rwsem(&cil
->xc_ctx_lock
);
900 init_waitqueue_head(&cil
->xc_commit_wait
);
902 INIT_LIST_HEAD(&ctx
->committing
);
903 INIT_LIST_HEAD(&ctx
->busy_extents
);
907 cil
->xc_current_sequence
= ctx
->sequence
;
918 if (log
->l_cilp
->xc_ctx
) {
919 if (log
->l_cilp
->xc_ctx
->ticket
)
920 xfs_log_ticket_put(log
->l_cilp
->xc_ctx
->ticket
);
921 kmem_free(log
->l_cilp
->xc_ctx
);
924 ASSERT(list_empty(&log
->l_cilp
->xc_cil
));
925 kmem_free(log
->l_cilp
);