xfs: global error sign conversion
[deliverable/linux.git] / fs / xfs / xfs_trans_buf.c
1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_buf_item.h"
30 #include "xfs_trans_priv.h"
31 #include "xfs_error.h"
32 #include "xfs_trace.h"
33
34 /*
35 * Check to see if a buffer matching the given parameters is already
36 * a part of the given transaction.
37 */
38 STATIC struct xfs_buf *
39 xfs_trans_buf_item_match(
40 struct xfs_trans *tp,
41 struct xfs_buftarg *target,
42 struct xfs_buf_map *map,
43 int nmaps)
44 {
45 struct xfs_log_item_desc *lidp;
46 struct xfs_buf_log_item *blip;
47 int len = 0;
48 int i;
49
50 for (i = 0; i < nmaps; i++)
51 len += map[i].bm_len;
52
53 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
54 blip = (struct xfs_buf_log_item *)lidp->lid_item;
55 if (blip->bli_item.li_type == XFS_LI_BUF &&
56 blip->bli_buf->b_target == target &&
57 XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
58 blip->bli_buf->b_length == len) {
59 ASSERT(blip->bli_buf->b_map_count == nmaps);
60 return blip->bli_buf;
61 }
62 }
63
64 return NULL;
65 }
66
67 /*
68 * Add the locked buffer to the transaction.
69 *
70 * The buffer must be locked, and it cannot be associated with any
71 * transaction.
72 *
73 * If the buffer does not yet have a buf log item associated with it,
74 * then allocate one for it. Then add the buf item to the transaction.
75 */
76 STATIC void
77 _xfs_trans_bjoin(
78 struct xfs_trans *tp,
79 struct xfs_buf *bp,
80 int reset_recur)
81 {
82 struct xfs_buf_log_item *bip;
83
84 ASSERT(bp->b_transp == NULL);
85
86 /*
87 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
88 * it doesn't have one yet, then allocate one and initialize it.
89 * The checks to see if one is there are in xfs_buf_item_init().
90 */
91 xfs_buf_item_init(bp, tp->t_mountp);
92 bip = bp->b_fspriv;
93 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
94 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
95 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
96 if (reset_recur)
97 bip->bli_recur = 0;
98
99 /*
100 * Take a reference for this transaction on the buf item.
101 */
102 atomic_inc(&bip->bli_refcount);
103
104 /*
105 * Get a log_item_desc to point at the new item.
106 */
107 xfs_trans_add_item(tp, &bip->bli_item);
108
109 /*
110 * Initialize b_fsprivate2 so we can find it with incore_match()
111 * in xfs_trans_get_buf() and friends above.
112 */
113 bp->b_transp = tp;
114
115 }
116
117 void
118 xfs_trans_bjoin(
119 struct xfs_trans *tp,
120 struct xfs_buf *bp)
121 {
122 _xfs_trans_bjoin(tp, bp, 0);
123 trace_xfs_trans_bjoin(bp->b_fspriv);
124 }
125
126 /*
127 * Get and lock the buffer for the caller if it is not already
128 * locked within the given transaction. If it is already locked
129 * within the transaction, just increment its lock recursion count
130 * and return a pointer to it.
131 *
132 * If the transaction pointer is NULL, make this just a normal
133 * get_buf() call.
134 */
135 struct xfs_buf *
136 xfs_trans_get_buf_map(
137 struct xfs_trans *tp,
138 struct xfs_buftarg *target,
139 struct xfs_buf_map *map,
140 int nmaps,
141 xfs_buf_flags_t flags)
142 {
143 xfs_buf_t *bp;
144 xfs_buf_log_item_t *bip;
145
146 if (!tp)
147 return xfs_buf_get_map(target, map, nmaps, flags);
148
149 /*
150 * If we find the buffer in the cache with this transaction
151 * pointer in its b_fsprivate2 field, then we know we already
152 * have it locked. In this case we just increment the lock
153 * recursion count and return the buffer to the caller.
154 */
155 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
156 if (bp != NULL) {
157 ASSERT(xfs_buf_islocked(bp));
158 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
159 xfs_buf_stale(bp);
160 XFS_BUF_DONE(bp);
161 }
162
163 ASSERT(bp->b_transp == tp);
164 bip = bp->b_fspriv;
165 ASSERT(bip != NULL);
166 ASSERT(atomic_read(&bip->bli_refcount) > 0);
167 bip->bli_recur++;
168 trace_xfs_trans_get_buf_recur(bip);
169 return bp;
170 }
171
172 bp = xfs_buf_get_map(target, map, nmaps, flags);
173 if (bp == NULL) {
174 return NULL;
175 }
176
177 ASSERT(!bp->b_error);
178
179 _xfs_trans_bjoin(tp, bp, 1);
180 trace_xfs_trans_get_buf(bp->b_fspriv);
181 return bp;
182 }
183
184 /*
185 * Get and lock the superblock buffer of this file system for the
186 * given transaction.
187 *
188 * We don't need to use incore_match() here, because the superblock
189 * buffer is a private buffer which we keep a pointer to in the
190 * mount structure.
191 */
192 xfs_buf_t *
193 xfs_trans_getsb(xfs_trans_t *tp,
194 struct xfs_mount *mp,
195 int flags)
196 {
197 xfs_buf_t *bp;
198 xfs_buf_log_item_t *bip;
199
200 /*
201 * Default to just trying to lock the superblock buffer
202 * if tp is NULL.
203 */
204 if (tp == NULL)
205 return xfs_getsb(mp, flags);
206
207 /*
208 * If the superblock buffer already has this transaction
209 * pointer in its b_fsprivate2 field, then we know we already
210 * have it locked. In this case we just increment the lock
211 * recursion count and return the buffer to the caller.
212 */
213 bp = mp->m_sb_bp;
214 if (bp->b_transp == tp) {
215 bip = bp->b_fspriv;
216 ASSERT(bip != NULL);
217 ASSERT(atomic_read(&bip->bli_refcount) > 0);
218 bip->bli_recur++;
219 trace_xfs_trans_getsb_recur(bip);
220 return bp;
221 }
222
223 bp = xfs_getsb(mp, flags);
224 if (bp == NULL)
225 return NULL;
226
227 _xfs_trans_bjoin(tp, bp, 1);
228 trace_xfs_trans_getsb(bp->b_fspriv);
229 return bp;
230 }
231
232 #ifdef DEBUG
233 xfs_buftarg_t *xfs_error_target;
234 int xfs_do_error;
235 int xfs_req_num;
236 int xfs_error_mod = 33;
237 #endif
238
239 /*
240 * Get and lock the buffer for the caller if it is not already
241 * locked within the given transaction. If it has not yet been
242 * read in, read it from disk. If it is already locked
243 * within the transaction and already read in, just increment its
244 * lock recursion count and return a pointer to it.
245 *
246 * If the transaction pointer is NULL, make this just a normal
247 * read_buf() call.
248 */
249 int
250 xfs_trans_read_buf_map(
251 struct xfs_mount *mp,
252 struct xfs_trans *tp,
253 struct xfs_buftarg *target,
254 struct xfs_buf_map *map,
255 int nmaps,
256 xfs_buf_flags_t flags,
257 struct xfs_buf **bpp,
258 const struct xfs_buf_ops *ops)
259 {
260 xfs_buf_t *bp;
261 xfs_buf_log_item_t *bip;
262 int error;
263
264 *bpp = NULL;
265 if (!tp) {
266 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
267 if (!bp)
268 return (flags & XBF_TRYLOCK) ?
269 -EAGAIN : -ENOMEM;
270
271 if (bp->b_error) {
272 error = bp->b_error;
273 xfs_buf_ioerror_alert(bp, __func__);
274 XFS_BUF_UNDONE(bp);
275 xfs_buf_stale(bp);
276 xfs_buf_relse(bp);
277
278 /* bad CRC means corrupted metadata */
279 if (error == -EFSBADCRC)
280 error = -EFSCORRUPTED;
281 return error;
282 }
283 #ifdef DEBUG
284 if (xfs_do_error) {
285 if (xfs_error_target == target) {
286 if (((xfs_req_num++) % xfs_error_mod) == 0) {
287 xfs_buf_relse(bp);
288 xfs_debug(mp, "Returning error!");
289 return -EIO;
290 }
291 }
292 }
293 #endif
294 if (XFS_FORCED_SHUTDOWN(mp))
295 goto shutdown_abort;
296 *bpp = bp;
297 return 0;
298 }
299
300 /*
301 * If we find the buffer in the cache with this transaction
302 * pointer in its b_fsprivate2 field, then we know we already
303 * have it locked. If it is already read in we just increment
304 * the lock recursion count and return the buffer to the caller.
305 * If the buffer is not yet read in, then we read it in, increment
306 * the lock recursion count, and return it to the caller.
307 */
308 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
309 if (bp != NULL) {
310 ASSERT(xfs_buf_islocked(bp));
311 ASSERT(bp->b_transp == tp);
312 ASSERT(bp->b_fspriv != NULL);
313 ASSERT(!bp->b_error);
314 if (!(XFS_BUF_ISDONE(bp))) {
315 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
316 ASSERT(!XFS_BUF_ISASYNC(bp));
317 ASSERT(bp->b_iodone == NULL);
318 XFS_BUF_READ(bp);
319 bp->b_ops = ops;
320
321 /*
322 * XXX(hch): clean up the error handling here to be less
323 * of a mess..
324 */
325 if (XFS_FORCED_SHUTDOWN(mp)) {
326 trace_xfs_bdstrat_shut(bp, _RET_IP_);
327 xfs_bioerror_relse(bp);
328 } else {
329 xfs_buf_iorequest(bp);
330 }
331
332 error = xfs_buf_iowait(bp);
333 if (error) {
334 xfs_buf_ioerror_alert(bp, __func__);
335 xfs_buf_relse(bp);
336 /*
337 * We can gracefully recover from most read
338 * errors. Ones we can't are those that happen
339 * after the transaction's already dirty.
340 */
341 if (tp->t_flags & XFS_TRANS_DIRTY)
342 xfs_force_shutdown(tp->t_mountp,
343 SHUTDOWN_META_IO_ERROR);
344 /* bad CRC means corrupted metadata */
345 if (error == -EFSBADCRC)
346 error = -EFSCORRUPTED;
347 return error;
348 }
349 }
350 /*
351 * We never locked this buf ourselves, so we shouldn't
352 * brelse it either. Just get out.
353 */
354 if (XFS_FORCED_SHUTDOWN(mp)) {
355 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
356 *bpp = NULL;
357 return -EIO;
358 }
359
360
361 bip = bp->b_fspriv;
362 bip->bli_recur++;
363
364 ASSERT(atomic_read(&bip->bli_refcount) > 0);
365 trace_xfs_trans_read_buf_recur(bip);
366 *bpp = bp;
367 return 0;
368 }
369
370 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
371 if (bp == NULL) {
372 *bpp = NULL;
373 return (flags & XBF_TRYLOCK) ?
374 0 : -ENOMEM;
375 }
376 if (bp->b_error) {
377 error = bp->b_error;
378 xfs_buf_stale(bp);
379 XFS_BUF_DONE(bp);
380 xfs_buf_ioerror_alert(bp, __func__);
381 if (tp->t_flags & XFS_TRANS_DIRTY)
382 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
383 xfs_buf_relse(bp);
384
385 /* bad CRC means corrupted metadata */
386 if (error == -EFSBADCRC)
387 error = -EFSCORRUPTED;
388 return error;
389 }
390 #ifdef DEBUG
391 if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
392 if (xfs_error_target == target) {
393 if (((xfs_req_num++) % xfs_error_mod) == 0) {
394 xfs_force_shutdown(tp->t_mountp,
395 SHUTDOWN_META_IO_ERROR);
396 xfs_buf_relse(bp);
397 xfs_debug(mp, "Returning trans error!");
398 return -EIO;
399 }
400 }
401 }
402 #endif
403 if (XFS_FORCED_SHUTDOWN(mp))
404 goto shutdown_abort;
405
406 _xfs_trans_bjoin(tp, bp, 1);
407 trace_xfs_trans_read_buf(bp->b_fspriv);
408
409 *bpp = bp;
410 return 0;
411
412 shutdown_abort:
413 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
414 xfs_buf_relse(bp);
415 *bpp = NULL;
416 return -EIO;
417 }
418
419 /*
420 * Release the buffer bp which was previously acquired with one of the
421 * xfs_trans_... buffer allocation routines if the buffer has not
422 * been modified within this transaction. If the buffer is modified
423 * within this transaction, do decrement the recursion count but do
424 * not release the buffer even if the count goes to 0. If the buffer is not
425 * modified within the transaction, decrement the recursion count and
426 * release the buffer if the recursion count goes to 0.
427 *
428 * If the buffer is to be released and it was not modified before
429 * this transaction began, then free the buf_log_item associated with it.
430 *
431 * If the transaction pointer is NULL, make this just a normal
432 * brelse() call.
433 */
434 void
435 xfs_trans_brelse(xfs_trans_t *tp,
436 xfs_buf_t *bp)
437 {
438 xfs_buf_log_item_t *bip;
439
440 /*
441 * Default to a normal brelse() call if the tp is NULL.
442 */
443 if (tp == NULL) {
444 ASSERT(bp->b_transp == NULL);
445 xfs_buf_relse(bp);
446 return;
447 }
448
449 ASSERT(bp->b_transp == tp);
450 bip = bp->b_fspriv;
451 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
452 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
453 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
454 ASSERT(atomic_read(&bip->bli_refcount) > 0);
455
456 trace_xfs_trans_brelse(bip);
457
458 /*
459 * If the release is just for a recursive lock,
460 * then decrement the count and return.
461 */
462 if (bip->bli_recur > 0) {
463 bip->bli_recur--;
464 return;
465 }
466
467 /*
468 * If the buffer is dirty within this transaction, we can't
469 * release it until we commit.
470 */
471 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
472 return;
473
474 /*
475 * If the buffer has been invalidated, then we can't release
476 * it until the transaction commits to disk unless it is re-dirtied
477 * as part of this transaction. This prevents us from pulling
478 * the item from the AIL before we should.
479 */
480 if (bip->bli_flags & XFS_BLI_STALE)
481 return;
482
483 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
484
485 /*
486 * Free up the log item descriptor tracking the released item.
487 */
488 xfs_trans_del_item(&bip->bli_item);
489
490 /*
491 * Clear the hold flag in the buf log item if it is set.
492 * We wouldn't want the next user of the buffer to
493 * get confused.
494 */
495 if (bip->bli_flags & XFS_BLI_HOLD) {
496 bip->bli_flags &= ~XFS_BLI_HOLD;
497 }
498
499 /*
500 * Drop our reference to the buf log item.
501 */
502 atomic_dec(&bip->bli_refcount);
503
504 /*
505 * If the buf item is not tracking data in the log, then
506 * we must free it before releasing the buffer back to the
507 * free pool. Before releasing the buffer to the free pool,
508 * clear the transaction pointer in b_fsprivate2 to dissolve
509 * its relation to this transaction.
510 */
511 if (!xfs_buf_item_dirty(bip)) {
512 /***
513 ASSERT(bp->b_pincount == 0);
514 ***/
515 ASSERT(atomic_read(&bip->bli_refcount) == 0);
516 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
517 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
518 xfs_buf_item_relse(bp);
519 }
520
521 bp->b_transp = NULL;
522 xfs_buf_relse(bp);
523 }
524
525 /*
526 * Mark the buffer as not needing to be unlocked when the buf item's
527 * iop_unlock() routine is called. The buffer must already be locked
528 * and associated with the given transaction.
529 */
530 /* ARGSUSED */
531 void
532 xfs_trans_bhold(xfs_trans_t *tp,
533 xfs_buf_t *bp)
534 {
535 xfs_buf_log_item_t *bip = bp->b_fspriv;
536
537 ASSERT(bp->b_transp == tp);
538 ASSERT(bip != NULL);
539 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
540 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
541 ASSERT(atomic_read(&bip->bli_refcount) > 0);
542
543 bip->bli_flags |= XFS_BLI_HOLD;
544 trace_xfs_trans_bhold(bip);
545 }
546
547 /*
548 * Cancel the previous buffer hold request made on this buffer
549 * for this transaction.
550 */
551 void
552 xfs_trans_bhold_release(xfs_trans_t *tp,
553 xfs_buf_t *bp)
554 {
555 xfs_buf_log_item_t *bip = bp->b_fspriv;
556
557 ASSERT(bp->b_transp == tp);
558 ASSERT(bip != NULL);
559 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
560 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
561 ASSERT(atomic_read(&bip->bli_refcount) > 0);
562 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
563
564 bip->bli_flags &= ~XFS_BLI_HOLD;
565 trace_xfs_trans_bhold_release(bip);
566 }
567
568 /*
569 * This is called to mark bytes first through last inclusive of the given
570 * buffer as needing to be logged when the transaction is committed.
571 * The buffer must already be associated with the given transaction.
572 *
573 * First and last are numbers relative to the beginning of this buffer,
574 * so the first byte in the buffer is numbered 0 regardless of the
575 * value of b_blkno.
576 */
577 void
578 xfs_trans_log_buf(xfs_trans_t *tp,
579 xfs_buf_t *bp,
580 uint first,
581 uint last)
582 {
583 xfs_buf_log_item_t *bip = bp->b_fspriv;
584
585 ASSERT(bp->b_transp == tp);
586 ASSERT(bip != NULL);
587 ASSERT(first <= last && last < BBTOB(bp->b_length));
588 ASSERT(bp->b_iodone == NULL ||
589 bp->b_iodone == xfs_buf_iodone_callbacks);
590
591 /*
592 * Mark the buffer as needing to be written out eventually,
593 * and set its iodone function to remove the buffer's buf log
594 * item from the AIL and free it when the buffer is flushed
595 * to disk. See xfs_buf_attach_iodone() for more details
596 * on li_cb and xfs_buf_iodone_callbacks().
597 * If we end up aborting this transaction, we trap this buffer
598 * inside the b_bdstrat callback so that this won't get written to
599 * disk.
600 */
601 XFS_BUF_DONE(bp);
602
603 ASSERT(atomic_read(&bip->bli_refcount) > 0);
604 bp->b_iodone = xfs_buf_iodone_callbacks;
605 bip->bli_item.li_cb = xfs_buf_iodone;
606
607 trace_xfs_trans_log_buf(bip);
608
609 /*
610 * If we invalidated the buffer within this transaction, then
611 * cancel the invalidation now that we're dirtying the buffer
612 * again. There are no races with the code in xfs_buf_item_unpin(),
613 * because we have a reference to the buffer this entire time.
614 */
615 if (bip->bli_flags & XFS_BLI_STALE) {
616 bip->bli_flags &= ~XFS_BLI_STALE;
617 ASSERT(XFS_BUF_ISSTALE(bp));
618 XFS_BUF_UNSTALE(bp);
619 bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
620 }
621
622 tp->t_flags |= XFS_TRANS_DIRTY;
623 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
624
625 /*
626 * If we have an ordered buffer we are not logging any dirty range but
627 * it still needs to be marked dirty and that it has been logged.
628 */
629 bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
630 if (!(bip->bli_flags & XFS_BLI_ORDERED))
631 xfs_buf_item_log(bip, first, last);
632 }
633
634
635 /*
636 * Invalidate a buffer that is being used within a transaction.
637 *
638 * Typically this is because the blocks in the buffer are being freed, so we
639 * need to prevent it from being written out when we're done. Allowing it
640 * to be written again might overwrite data in the free blocks if they are
641 * reallocated to a file.
642 *
643 * We prevent the buffer from being written out by marking it stale. We can't
644 * get rid of the buf log item at this point because the buffer may still be
645 * pinned by another transaction. If that is the case, then we'll wait until
646 * the buffer is committed to disk for the last time (we can tell by the ref
647 * count) and free it in xfs_buf_item_unpin(). Until that happens we will
648 * keep the buffer locked so that the buffer and buf log item are not reused.
649 *
650 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
651 * the buf item. This will be used at recovery time to determine that copies
652 * of the buffer in the log before this should not be replayed.
653 *
654 * We mark the item descriptor and the transaction dirty so that we'll hold
655 * the buffer until after the commit.
656 *
657 * Since we're invalidating the buffer, we also clear the state about which
658 * parts of the buffer have been logged. We also clear the flag indicating
659 * that this is an inode buffer since the data in the buffer will no longer
660 * be valid.
661 *
662 * We set the stale bit in the buffer as well since we're getting rid of it.
663 */
664 void
665 xfs_trans_binval(
666 xfs_trans_t *tp,
667 xfs_buf_t *bp)
668 {
669 xfs_buf_log_item_t *bip = bp->b_fspriv;
670 int i;
671
672 ASSERT(bp->b_transp == tp);
673 ASSERT(bip != NULL);
674 ASSERT(atomic_read(&bip->bli_refcount) > 0);
675
676 trace_xfs_trans_binval(bip);
677
678 if (bip->bli_flags & XFS_BLI_STALE) {
679 /*
680 * If the buffer is already invalidated, then
681 * just return.
682 */
683 ASSERT(XFS_BUF_ISSTALE(bp));
684 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
685 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
686 ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
687 ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
688 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
689 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
690 return;
691 }
692
693 xfs_buf_stale(bp);
694
695 bip->bli_flags |= XFS_BLI_STALE;
696 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
697 bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
698 bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
699 bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
700 for (i = 0; i < bip->bli_format_count; i++) {
701 memset(bip->bli_formats[i].blf_data_map, 0,
702 (bip->bli_formats[i].blf_map_size * sizeof(uint)));
703 }
704 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
705 tp->t_flags |= XFS_TRANS_DIRTY;
706 }
707
708 /*
709 * This call is used to indicate that the buffer contains on-disk inodes which
710 * must be handled specially during recovery. They require special handling
711 * because only the di_next_unlinked from the inodes in the buffer should be
712 * recovered. The rest of the data in the buffer is logged via the inodes
713 * themselves.
714 *
715 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
716 * transferred to the buffer's log format structure so that we'll know what to
717 * do at recovery time.
718 */
719 void
720 xfs_trans_inode_buf(
721 xfs_trans_t *tp,
722 xfs_buf_t *bp)
723 {
724 xfs_buf_log_item_t *bip = bp->b_fspriv;
725
726 ASSERT(bp->b_transp == tp);
727 ASSERT(bip != NULL);
728 ASSERT(atomic_read(&bip->bli_refcount) > 0);
729
730 bip->bli_flags |= XFS_BLI_INODE_BUF;
731 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
732 }
733
734 /*
735 * This call is used to indicate that the buffer is going to
736 * be staled and was an inode buffer. This means it gets
737 * special processing during unpin - where any inodes
738 * associated with the buffer should be removed from ail.
739 * There is also special processing during recovery,
740 * any replay of the inodes in the buffer needs to be
741 * prevented as the buffer may have been reused.
742 */
743 void
744 xfs_trans_stale_inode_buf(
745 xfs_trans_t *tp,
746 xfs_buf_t *bp)
747 {
748 xfs_buf_log_item_t *bip = bp->b_fspriv;
749
750 ASSERT(bp->b_transp == tp);
751 ASSERT(bip != NULL);
752 ASSERT(atomic_read(&bip->bli_refcount) > 0);
753
754 bip->bli_flags |= XFS_BLI_STALE_INODE;
755 bip->bli_item.li_cb = xfs_buf_iodone;
756 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
757 }
758
759 /*
760 * Mark the buffer as being one which contains newly allocated
761 * inodes. We need to make sure that even if this buffer is
762 * relogged as an 'inode buf' we still recover all of the inode
763 * images in the face of a crash. This works in coordination with
764 * xfs_buf_item_committed() to ensure that the buffer remains in the
765 * AIL at its original location even after it has been relogged.
766 */
767 /* ARGSUSED */
768 void
769 xfs_trans_inode_alloc_buf(
770 xfs_trans_t *tp,
771 xfs_buf_t *bp)
772 {
773 xfs_buf_log_item_t *bip = bp->b_fspriv;
774
775 ASSERT(bp->b_transp == tp);
776 ASSERT(bip != NULL);
777 ASSERT(atomic_read(&bip->bli_refcount) > 0);
778
779 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
780 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
781 }
782
783 /*
784 * Mark the buffer as ordered for this transaction. This means
785 * that the contents of the buffer are not recorded in the transaction
786 * but it is tracked in the AIL as though it was. This allows us
787 * to record logical changes in transactions rather than the physical
788 * changes we make to the buffer without changing writeback ordering
789 * constraints of metadata buffers.
790 */
791 void
792 xfs_trans_ordered_buf(
793 struct xfs_trans *tp,
794 struct xfs_buf *bp)
795 {
796 struct xfs_buf_log_item *bip = bp->b_fspriv;
797
798 ASSERT(bp->b_transp == tp);
799 ASSERT(bip != NULL);
800 ASSERT(atomic_read(&bip->bli_refcount) > 0);
801
802 bip->bli_flags |= XFS_BLI_ORDERED;
803 trace_xfs_buf_item_ordered(bip);
804 }
805
806 /*
807 * Set the type of the buffer for log recovery so that it can correctly identify
808 * and hence attach the correct buffer ops to the buffer after replay.
809 */
810 void
811 xfs_trans_buf_set_type(
812 struct xfs_trans *tp,
813 struct xfs_buf *bp,
814 enum xfs_blft type)
815 {
816 struct xfs_buf_log_item *bip = bp->b_fspriv;
817
818 if (!tp)
819 return;
820
821 ASSERT(bp->b_transp == tp);
822 ASSERT(bip != NULL);
823 ASSERT(atomic_read(&bip->bli_refcount) > 0);
824
825 xfs_blft_to_flags(&bip->__bli_format, type);
826 }
827
828 void
829 xfs_trans_buf_copy_type(
830 struct xfs_buf *dst_bp,
831 struct xfs_buf *src_bp)
832 {
833 struct xfs_buf_log_item *sbip = src_bp->b_fspriv;
834 struct xfs_buf_log_item *dbip = dst_bp->b_fspriv;
835 enum xfs_blft type;
836
837 type = xfs_blft_from_flags(&sbip->__bli_format);
838 xfs_blft_to_flags(&dbip->__bli_format, type);
839 }
840
841 /*
842 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
843 * dquots. However, unlike in inode buffer recovery, dquot buffers get
844 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
845 * The only thing that makes dquot buffers different from regular
846 * buffers is that we must not replay dquot bufs when recovering
847 * if a _corresponding_ quotaoff has happened. We also have to distinguish
848 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
849 * can be turned off independently.
850 */
851 /* ARGSUSED */
852 void
853 xfs_trans_dquot_buf(
854 xfs_trans_t *tp,
855 xfs_buf_t *bp,
856 uint type)
857 {
858 struct xfs_buf_log_item *bip = bp->b_fspriv;
859
860 ASSERT(type == XFS_BLF_UDQUOT_BUF ||
861 type == XFS_BLF_PDQUOT_BUF ||
862 type == XFS_BLF_GDQUOT_BUF);
863
864 bip->__bli_format.blf_flags |= type;
865
866 switch (type) {
867 case XFS_BLF_UDQUOT_BUF:
868 type = XFS_BLFT_UDQUOT_BUF;
869 break;
870 case XFS_BLF_PDQUOT_BUF:
871 type = XFS_BLFT_PDQUOT_BUF;
872 break;
873 case XFS_BLF_GDQUOT_BUF:
874 type = XFS_BLFT_GDQUOT_BUF;
875 break;
876 default:
877 type = XFS_BLFT_UNKNOWN_BUF;
878 break;
879 }
880
881 xfs_trans_buf_set_type(tp, bp, type);
882 }
This page took 0.052161 seconds and 5 git commands to generate.