xfs: kill xfs_read_buf()
[deliverable/linux.git] / fs / xfs / xfs_trans_buf.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
a844f451 19#include "xfs_fs.h"
1da177e4 20#include "xfs_types.h"
a844f451 21#include "xfs_bit.h"
1da177e4 22#include "xfs_log.h"
a844f451 23#include "xfs_inum.h"
1da177e4 24#include "xfs_trans.h"
1da177e4
LT
25#include "xfs_sb.h"
26#include "xfs_ag.h"
1da177e4 27#include "xfs_mount.h"
a844f451
NS
28#include "xfs_bmap_btree.h"
29#include "xfs_alloc_btree.h"
30#include "xfs_ialloc_btree.h"
a844f451
NS
31#include "xfs_dinode.h"
32#include "xfs_inode.h"
33#include "xfs_buf_item.h"
1da177e4
LT
34#include "xfs_trans_priv.h"
35#include "xfs_error.h"
36#include "xfs_rw.h"
0b1b213f 37#include "xfs_trace.h"
1da177e4 38
4a5224d7
CH
39/*
40 * Check to see if a buffer matching the given parameters is already
41 * a part of the given transaction.
42 */
43STATIC struct xfs_buf *
44xfs_trans_buf_item_match(
45 struct xfs_trans *tp,
46 struct xfs_buftarg *target,
47 xfs_daddr_t blkno,
48 int len)
49{
e98c414f
CH
50 struct xfs_log_item_desc *lidp;
51 struct xfs_buf_log_item *blip;
1da177e4 52
4a5224d7 53 len = BBTOB(len);
e98c414f
CH
54 list_for_each_entry(lidp, &tp->t_items, lid_trans) {
55 blip = (struct xfs_buf_log_item *)lidp->lid_item;
56 if (blip->bli_item.li_type == XFS_LI_BUF &&
49074c06 57 blip->bli_buf->b_target == target &&
e98c414f 58 XFS_BUF_ADDR(blip->bli_buf) == blkno &&
aa0e8833 59 BBTOB(blip->bli_buf->b_length) == len)
e98c414f 60 return blip->bli_buf;
4a5224d7
CH
61 }
62
63 return NULL;
64}
1da177e4 65
d7e84f41
CH
66/*
67 * Add the locked buffer to the transaction.
68 *
69 * The buffer must be locked, and it cannot be associated with any
70 * transaction.
71 *
72 * If the buffer does not yet have a buf log item associated with it,
73 * then allocate one for it. Then add the buf item to the transaction.
74 */
75STATIC void
76_xfs_trans_bjoin(
77 struct xfs_trans *tp,
78 struct xfs_buf *bp,
79 int reset_recur)
80{
81 struct xfs_buf_log_item *bip;
82
bf9d9013 83 ASSERT(bp->b_transp == NULL);
d7e84f41
CH
84
85 /*
86 * The xfs_buf_log_item pointer is stored in b_fsprivate. If
87 * it doesn't have one yet, then allocate one and initialize it.
88 * The checks to see if one is there are in xfs_buf_item_init().
89 */
90 xfs_buf_item_init(bp, tp->t_mountp);
adadbeef 91 bip = bp->b_fspriv;
d7e84f41 92 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 93 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
d7e84f41
CH
94 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
95 if (reset_recur)
96 bip->bli_recur = 0;
97
98 /*
99 * Take a reference for this transaction on the buf item.
100 */
101 atomic_inc(&bip->bli_refcount);
102
103 /*
104 * Get a log_item_desc to point at the new item.
105 */
e98c414f 106 xfs_trans_add_item(tp, &bip->bli_item);
d7e84f41
CH
107
108 /*
109 * Initialize b_fsprivate2 so we can find it with incore_match()
110 * in xfs_trans_get_buf() and friends above.
111 */
bf9d9013 112 bp->b_transp = tp;
d7e84f41
CH
113
114}
115
116void
117xfs_trans_bjoin(
118 struct xfs_trans *tp,
119 struct xfs_buf *bp)
120{
121 _xfs_trans_bjoin(tp, bp, 0);
122 trace_xfs_trans_bjoin(bp->b_fspriv);
123}
1da177e4
LT
124
125/*
126 * Get and lock the buffer for the caller if it is not already
127 * locked within the given transaction. If it is already locked
128 * within the transaction, just increment its lock recursion count
129 * and return a pointer to it.
130 *
1da177e4
LT
131 * If the transaction pointer is NULL, make this just a normal
132 * get_buf() call.
133 */
134xfs_buf_t *
135xfs_trans_get_buf(xfs_trans_t *tp,
136 xfs_buftarg_t *target_dev,
137 xfs_daddr_t blkno,
138 int len,
139 uint flags)
140{
141 xfs_buf_t *bp;
142 xfs_buf_log_item_t *bip;
143
144 if (flags == 0)
a8acad70 145 flags = XBF_MAPPED;
1da177e4
LT
146
147 /*
148 * Default to a normal get_buf() call if the tp is NULL.
149 */
6ad112bf 150 if (tp == NULL)
0cadda1c
CH
151 return xfs_buf_get(target_dev, blkno, len,
152 flags | XBF_DONT_BLOCK);
1da177e4
LT
153
154 /*
155 * If we find the buffer in the cache with this transaction
156 * pointer in its b_fsprivate2 field, then we know we already
157 * have it locked. In this case we just increment the lock
158 * recursion count and return the buffer to the caller.
159 */
4a5224d7 160 bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
1da177e4 161 if (bp != NULL) {
0c842ad4 162 ASSERT(xfs_buf_islocked(bp));
c867cb61
CH
163 if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
164 xfs_buf_stale(bp);
c867cb61
CH
165 XFS_BUF_DONE(bp);
166 }
0b1b213f 167
bf9d9013 168 ASSERT(bp->b_transp == tp);
adadbeef 169 bip = bp->b_fspriv;
1da177e4
LT
170 ASSERT(bip != NULL);
171 ASSERT(atomic_read(&bip->bli_refcount) > 0);
172 bip->bli_recur++;
0b1b213f 173 trace_xfs_trans_get_buf_recur(bip);
1da177e4
LT
174 return (bp);
175 }
176
177 /*
0cadda1c
CH
178 * We always specify the XBF_DONT_BLOCK flag within a transaction
179 * so that get_buf does not try to push out a delayed write buffer
1da177e4
LT
180 * which might cause another transaction to take place (if the
181 * buffer was delayed alloc). Such recursive transactions can
182 * easily deadlock with our current transaction as well as cause
183 * us to run out of stack space.
184 */
0cadda1c 185 bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
1da177e4
LT
186 if (bp == NULL) {
187 return NULL;
188 }
189
5a52c2a5 190 ASSERT(!bp->b_error);
1da177e4 191
d7e84f41
CH
192 _xfs_trans_bjoin(tp, bp, 1);
193 trace_xfs_trans_get_buf(bp->b_fspriv);
1da177e4
LT
194 return (bp);
195}
196
197/*
198 * Get and lock the superblock buffer of this file system for the
199 * given transaction.
200 *
201 * We don't need to use incore_match() here, because the superblock
202 * buffer is a private buffer which we keep a pointer to in the
203 * mount structure.
204 */
205xfs_buf_t *
206xfs_trans_getsb(xfs_trans_t *tp,
207 struct xfs_mount *mp,
208 int flags)
209{
210 xfs_buf_t *bp;
211 xfs_buf_log_item_t *bip;
212
213 /*
214 * Default to just trying to lock the superblock buffer
215 * if tp is NULL.
216 */
217 if (tp == NULL) {
218 return (xfs_getsb(mp, flags));
219 }
220
221 /*
222 * If the superblock buffer already has this transaction
223 * pointer in its b_fsprivate2 field, then we know we already
224 * have it locked. In this case we just increment the lock
225 * recursion count and return the buffer to the caller.
226 */
227 bp = mp->m_sb_bp;
bf9d9013 228 if (bp->b_transp == tp) {
adadbeef 229 bip = bp->b_fspriv;
1da177e4
LT
230 ASSERT(bip != NULL);
231 ASSERT(atomic_read(&bip->bli_refcount) > 0);
232 bip->bli_recur++;
0b1b213f 233 trace_xfs_trans_getsb_recur(bip);
1da177e4
LT
234 return (bp);
235 }
236
237 bp = xfs_getsb(mp, flags);
d7e84f41 238 if (bp == NULL)
1da177e4 239 return NULL;
1da177e4 240
d7e84f41
CH
241 _xfs_trans_bjoin(tp, bp, 1);
242 trace_xfs_trans_getsb(bp->b_fspriv);
1da177e4
LT
243 return (bp);
244}
245
246#ifdef DEBUG
247xfs_buftarg_t *xfs_error_target;
248int xfs_do_error;
249int xfs_req_num;
250int xfs_error_mod = 33;
251#endif
252
253/*
254 * Get and lock the buffer for the caller if it is not already
255 * locked within the given transaction. If it has not yet been
256 * read in, read it from disk. If it is already locked
257 * within the transaction and already read in, just increment its
258 * lock recursion count and return a pointer to it.
259 *
1da177e4
LT
260 * If the transaction pointer is NULL, make this just a normal
261 * read_buf() call.
262 */
263int
264xfs_trans_read_buf(
265 xfs_mount_t *mp,
266 xfs_trans_t *tp,
267 xfs_buftarg_t *target,
268 xfs_daddr_t blkno,
269 int len,
270 uint flags,
271 xfs_buf_t **bpp)
272{
273 xfs_buf_t *bp;
274 xfs_buf_log_item_t *bip;
275 int error;
276
7ca790a5
DC
277 *bpp = NULL;
278
1da177e4 279 if (flags == 0)
a8acad70 280 flags = XBF_MAPPED;
1da177e4
LT
281
282 /*
283 * Default to a normal get_buf() call if the tp is NULL.
284 */
285 if (tp == NULL) {
0cadda1c 286 bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
1da177e4 287 if (!bp)
0cadda1c 288 return (flags & XBF_TRYLOCK) ?
a3f74ffb 289 EAGAIN : XFS_ERROR(ENOMEM);
1da177e4 290
5a52c2a5
CS
291 if (bp->b_error) {
292 error = bp->b_error;
901796af 293 xfs_buf_ioerror_alert(bp, __func__);
7ca790a5
DC
294 XFS_BUF_UNDONE(bp);
295 xfs_buf_stale(bp);
1da177e4
LT
296 xfs_buf_relse(bp);
297 return error;
298 }
299#ifdef DEBUG
a0f7bfd3 300 if (xfs_do_error) {
1da177e4
LT
301 if (xfs_error_target == target) {
302 if (((xfs_req_num++) % xfs_error_mod) == 0) {
303 xfs_buf_relse(bp);
0b932ccc 304 xfs_debug(mp, "Returning error!");
1da177e4
LT
305 return XFS_ERROR(EIO);
306 }
307 }
308 }
309#endif
310 if (XFS_FORCED_SHUTDOWN(mp))
311 goto shutdown_abort;
312 *bpp = bp;
313 return 0;
314 }
315
316 /*
317 * If we find the buffer in the cache with this transaction
318 * pointer in its b_fsprivate2 field, then we know we already
319 * have it locked. If it is already read in we just increment
320 * the lock recursion count and return the buffer to the caller.
321 * If the buffer is not yet read in, then we read it in, increment
322 * the lock recursion count, and return it to the caller.
323 */
4a5224d7 324 bp = xfs_trans_buf_item_match(tp, target, blkno, len);
1da177e4 325 if (bp != NULL) {
0c842ad4 326 ASSERT(xfs_buf_islocked(bp));
bf9d9013 327 ASSERT(bp->b_transp == tp);
adadbeef 328 ASSERT(bp->b_fspriv != NULL);
5a52c2a5 329 ASSERT(!bp->b_error);
1da177e4 330 if (!(XFS_BUF_ISDONE(bp))) {
0b1b213f 331 trace_xfs_trans_read_buf_io(bp, _RET_IP_);
1da177e4
LT
332 ASSERT(!XFS_BUF_ISASYNC(bp));
333 XFS_BUF_READ(bp);
334 xfsbdstrat(tp->t_mountp, bp);
1a1a3e97 335 error = xfs_buf_iowait(bp);
d64e31a2 336 if (error) {
901796af 337 xfs_buf_ioerror_alert(bp, __func__);
1da177e4
LT
338 xfs_buf_relse(bp);
339 /*
d64e31a2
DC
340 * We can gracefully recover from most read
341 * errors. Ones we can't are those that happen
342 * after the transaction's already dirty.
1da177e4
LT
343 */
344 if (tp->t_flags & XFS_TRANS_DIRTY)
345 xfs_force_shutdown(tp->t_mountp,
7d04a335 346 SHUTDOWN_META_IO_ERROR);
1da177e4
LT
347 return error;
348 }
349 }
350 /*
351 * We never locked this buf ourselves, so we shouldn't
352 * brelse it either. Just get out.
353 */
354 if (XFS_FORCED_SHUTDOWN(mp)) {
0b1b213f 355 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
1da177e4
LT
356 *bpp = NULL;
357 return XFS_ERROR(EIO);
358 }
359
360
adadbeef 361 bip = bp->b_fspriv;
1da177e4
LT
362 bip->bli_recur++;
363
364 ASSERT(atomic_read(&bip->bli_refcount) > 0);
0b1b213f 365 trace_xfs_trans_read_buf_recur(bip);
1da177e4
LT
366 *bpp = bp;
367 return 0;
368 }
369
370 /*
0cadda1c
CH
371 * We always specify the XBF_DONT_BLOCK flag within a transaction
372 * so that get_buf does not try to push out a delayed write buffer
1da177e4
LT
373 * which might cause another transaction to take place (if the
374 * buffer was delayed alloc). Such recursive transactions can
375 * easily deadlock with our current transaction as well as cause
376 * us to run out of stack space.
377 */
0cadda1c 378 bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
1da177e4
LT
379 if (bp == NULL) {
380 *bpp = NULL;
7401aafd
DC
381 return (flags & XBF_TRYLOCK) ?
382 0 : XFS_ERROR(ENOMEM);
1da177e4 383 }
5a52c2a5
CS
384 if (bp->b_error) {
385 error = bp->b_error;
c867cb61 386 xfs_buf_stale(bp);
c867cb61 387 XFS_BUF_DONE(bp);
901796af 388 xfs_buf_ioerror_alert(bp, __func__);
1da177e4 389 if (tp->t_flags & XFS_TRANS_DIRTY)
7d04a335 390 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
1da177e4
LT
391 xfs_buf_relse(bp);
392 return error;
393 }
394#ifdef DEBUG
395 if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) {
396 if (xfs_error_target == target) {
397 if (((xfs_req_num++) % xfs_error_mod) == 0) {
398 xfs_force_shutdown(tp->t_mountp,
7d04a335 399 SHUTDOWN_META_IO_ERROR);
1da177e4 400 xfs_buf_relse(bp);
0b932ccc 401 xfs_debug(mp, "Returning trans error!");
1da177e4
LT
402 return XFS_ERROR(EIO);
403 }
404 }
405 }
406#endif
407 if (XFS_FORCED_SHUTDOWN(mp))
408 goto shutdown_abort;
409
d7e84f41
CH
410 _xfs_trans_bjoin(tp, bp, 1);
411 trace_xfs_trans_read_buf(bp->b_fspriv);
1da177e4 412
1da177e4
LT
413 *bpp = bp;
414 return 0;
415
416shutdown_abort:
0b1b213f 417 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
1da177e4
LT
418 xfs_buf_relse(bp);
419 *bpp = NULL;
420 return XFS_ERROR(EIO);
421}
422
423
424/*
425 * Release the buffer bp which was previously acquired with one of the
426 * xfs_trans_... buffer allocation routines if the buffer has not
427 * been modified within this transaction. If the buffer is modified
428 * within this transaction, do decrement the recursion count but do
429 * not release the buffer even if the count goes to 0. If the buffer is not
430 * modified within the transaction, decrement the recursion count and
431 * release the buffer if the recursion count goes to 0.
432 *
433 * If the buffer is to be released and it was not modified before
434 * this transaction began, then free the buf_log_item associated with it.
435 *
436 * If the transaction pointer is NULL, make this just a normal
437 * brelse() call.
438 */
439void
440xfs_trans_brelse(xfs_trans_t *tp,
441 xfs_buf_t *bp)
442{
443 xfs_buf_log_item_t *bip;
1da177e4
LT
444
445 /*
446 * Default to a normal brelse() call if the tp is NULL.
447 */
448 if (tp == NULL) {
bf9d9013 449 ASSERT(bp->b_transp == NULL);
1da177e4
LT
450 xfs_buf_relse(bp);
451 return;
452 }
453
bf9d9013 454 ASSERT(bp->b_transp == tp);
adadbeef 455 bip = bp->b_fspriv;
1da177e4
LT
456 ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
457 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 458 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
1da177e4
LT
459 ASSERT(atomic_read(&bip->bli_refcount) > 0);
460
0b1b213f
CH
461 trace_xfs_trans_brelse(bip);
462
1da177e4
LT
463 /*
464 * If the release is just for a recursive lock,
465 * then decrement the count and return.
466 */
467 if (bip->bli_recur > 0) {
468 bip->bli_recur--;
1da177e4
LT
469 return;
470 }
471
472 /*
473 * If the buffer is dirty within this transaction, we can't
474 * release it until we commit.
475 */
e98c414f 476 if (bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY)
1da177e4 477 return;
1da177e4
LT
478
479 /*
480 * If the buffer has been invalidated, then we can't release
481 * it until the transaction commits to disk unless it is re-dirtied
482 * as part of this transaction. This prevents us from pulling
483 * the item from the AIL before we should.
484 */
0b1b213f 485 if (bip->bli_flags & XFS_BLI_STALE)
1da177e4 486 return;
1da177e4
LT
487
488 ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
1da177e4
LT
489
490 /*
491 * Free up the log item descriptor tracking the released item.
492 */
e98c414f 493 xfs_trans_del_item(&bip->bli_item);
1da177e4
LT
494
495 /*
496 * Clear the hold flag in the buf log item if it is set.
497 * We wouldn't want the next user of the buffer to
498 * get confused.
499 */
500 if (bip->bli_flags & XFS_BLI_HOLD) {
501 bip->bli_flags &= ~XFS_BLI_HOLD;
502 }
503
504 /*
505 * Drop our reference to the buf log item.
506 */
507 atomic_dec(&bip->bli_refcount);
508
509 /*
510 * If the buf item is not tracking data in the log, then
511 * we must free it before releasing the buffer back to the
512 * free pool. Before releasing the buffer to the free pool,
513 * clear the transaction pointer in b_fsprivate2 to dissolve
514 * its relation to this transaction.
515 */
516 if (!xfs_buf_item_dirty(bip)) {
517/***
518 ASSERT(bp->b_pincount == 0);
519***/
520 ASSERT(atomic_read(&bip->bli_refcount) == 0);
521 ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
522 ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
523 xfs_buf_item_relse(bp);
1da177e4
LT
524 }
525
5b03ff1b 526 bp->b_transp = NULL;
1da177e4 527 xfs_buf_relse(bp);
1da177e4
LT
528}
529
1da177e4
LT
530/*
531 * Mark the buffer as not needing to be unlocked when the buf item's
532 * IOP_UNLOCK() routine is called. The buffer must already be locked
533 * and associated with the given transaction.
534 */
535/* ARGSUSED */
536void
537xfs_trans_bhold(xfs_trans_t *tp,
538 xfs_buf_t *bp)
539{
adadbeef 540 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 541
bf9d9013 542 ASSERT(bp->b_transp == tp);
adadbeef 543 ASSERT(bip != NULL);
1da177e4 544 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 545 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
1da177e4 546 ASSERT(atomic_read(&bip->bli_refcount) > 0);
adadbeef 547
1da177e4 548 bip->bli_flags |= XFS_BLI_HOLD;
0b1b213f 549 trace_xfs_trans_bhold(bip);
1da177e4
LT
550}
551
efa092f3
TS
552/*
553 * Cancel the previous buffer hold request made on this buffer
554 * for this transaction.
555 */
556void
557xfs_trans_bhold_release(xfs_trans_t *tp,
558 xfs_buf_t *bp)
559{
adadbeef 560 xfs_buf_log_item_t *bip = bp->b_fspriv;
efa092f3 561
bf9d9013 562 ASSERT(bp->b_transp == tp);
adadbeef 563 ASSERT(bip != NULL);
efa092f3 564 ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
c1155410 565 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
efa092f3
TS
566 ASSERT(atomic_read(&bip->bli_refcount) > 0);
567 ASSERT(bip->bli_flags & XFS_BLI_HOLD);
0b1b213f 568
adadbeef 569 bip->bli_flags &= ~XFS_BLI_HOLD;
0b1b213f 570 trace_xfs_trans_bhold_release(bip);
efa092f3
TS
571}
572
1da177e4
LT
573/*
574 * This is called to mark bytes first through last inclusive of the given
575 * buffer as needing to be logged when the transaction is committed.
576 * The buffer must already be associated with the given transaction.
577 *
578 * First and last are numbers relative to the beginning of this buffer,
579 * so the first byte in the buffer is numbered 0 regardless of the
580 * value of b_blkno.
581 */
582void
583xfs_trans_log_buf(xfs_trans_t *tp,
584 xfs_buf_t *bp,
585 uint first,
586 uint last)
587{
adadbeef 588 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 589
bf9d9013 590 ASSERT(bp->b_transp == tp);
adadbeef 591 ASSERT(bip != NULL);
aa0e8833 592 ASSERT(first <= last && last < BBTOB(bp->b_length));
cb669ca5
CH
593 ASSERT(bp->b_iodone == NULL ||
594 bp->b_iodone == xfs_buf_iodone_callbacks);
1da177e4
LT
595
596 /*
597 * Mark the buffer as needing to be written out eventually,
598 * and set its iodone function to remove the buffer's buf log
599 * item from the AIL and free it when the buffer is flushed
600 * to disk. See xfs_buf_attach_iodone() for more details
601 * on li_cb and xfs_buf_iodone_callbacks().
602 * If we end up aborting this transaction, we trap this buffer
603 * inside the b_bdstrat callback so that this won't get written to
604 * disk.
605 */
1da177e4
LT
606 XFS_BUF_DONE(bp);
607
1da177e4 608 ASSERT(atomic_read(&bip->bli_refcount) > 0);
cb669ca5 609 bp->b_iodone = xfs_buf_iodone_callbacks;
ca30b2a7 610 bip->bli_item.li_cb = xfs_buf_iodone;
1da177e4 611
0b1b213f
CH
612 trace_xfs_trans_log_buf(bip);
613
1da177e4
LT
614 /*
615 * If we invalidated the buffer within this transaction, then
616 * cancel the invalidation now that we're dirtying the buffer
617 * again. There are no races with the code in xfs_buf_item_unpin(),
618 * because we have a reference to the buffer this entire time.
619 */
620 if (bip->bli_flags & XFS_BLI_STALE) {
1da177e4
LT
621 bip->bli_flags &= ~XFS_BLI_STALE;
622 ASSERT(XFS_BUF_ISSTALE(bp));
623 XFS_BUF_UNSTALE(bp);
c1155410 624 bip->bli_format.blf_flags &= ~XFS_BLF_CANCEL;
1da177e4
LT
625 }
626
1da177e4 627 tp->t_flags |= XFS_TRANS_DIRTY;
e98c414f 628 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
1da177e4
LT
629 bip->bli_flags |= XFS_BLI_LOGGED;
630 xfs_buf_item_log(bip, first, last);
1da177e4
LT
631}
632
633
634/*
43ff2122
CH
635 * Invalidate a buffer that is being used within a transaction.
636 *
637 * Typically this is because the blocks in the buffer are being freed, so we
638 * need to prevent it from being written out when we're done. Allowing it
639 * to be written again might overwrite data in the free blocks if they are
640 * reallocated to a file.
1da177e4 641 *
43ff2122
CH
642 * We prevent the buffer from being written out by marking it stale. We can't
643 * get rid of the buf log item at this point because the buffer may still be
644 * pinned by another transaction. If that is the case, then we'll wait until
645 * the buffer is committed to disk for the last time (we can tell by the ref
646 * count) and free it in xfs_buf_item_unpin(). Until that happens we will
647 * keep the buffer locked so that the buffer and buf log item are not reused.
648 *
649 * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
650 * the buf item. This will be used at recovery time to determine that copies
651 * of the buffer in the log before this should not be replayed.
652 *
653 * We mark the item descriptor and the transaction dirty so that we'll hold
654 * the buffer until after the commit.
655 *
656 * Since we're invalidating the buffer, we also clear the state about which
657 * parts of the buffer have been logged. We also clear the flag indicating
658 * that this is an inode buffer since the data in the buffer will no longer
659 * be valid.
660 *
661 * We set the stale bit in the buffer as well since we're getting rid of it.
1da177e4
LT
662 */
663void
664xfs_trans_binval(
665 xfs_trans_t *tp,
666 xfs_buf_t *bp)
667{
adadbeef 668 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 669
bf9d9013 670 ASSERT(bp->b_transp == tp);
adadbeef 671 ASSERT(bip != NULL);
1da177e4
LT
672 ASSERT(atomic_read(&bip->bli_refcount) > 0);
673
0b1b213f
CH
674 trace_xfs_trans_binval(bip);
675
1da177e4
LT
676 if (bip->bli_flags & XFS_BLI_STALE) {
677 /*
678 * If the buffer is already invalidated, then
679 * just return.
680 */
1da177e4
LT
681 ASSERT(XFS_BUF_ISSTALE(bp));
682 ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
c1155410
DC
683 ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_INODE_BUF));
684 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
e98c414f 685 ASSERT(bip->bli_item.li_desc->lid_flags & XFS_LID_DIRTY);
1da177e4 686 ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
1da177e4
LT
687 return;
688 }
689
c867cb61 690 xfs_buf_stale(bp);
43ff2122 691
1da177e4 692 bip->bli_flags |= XFS_BLI_STALE;
ccf7c23f 693 bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
c1155410
DC
694 bip->bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
695 bip->bli_format.blf_flags |= XFS_BLF_CANCEL;
1da177e4
LT
696 memset((char *)(bip->bli_format.blf_data_map), 0,
697 (bip->bli_format.blf_map_size * sizeof(uint)));
e98c414f 698 bip->bli_item.li_desc->lid_flags |= XFS_LID_DIRTY;
1da177e4 699 tp->t_flags |= XFS_TRANS_DIRTY;
1da177e4
LT
700}
701
702/*
ccf7c23f
DC
703 * This call is used to indicate that the buffer contains on-disk inodes which
704 * must be handled specially during recovery. They require special handling
705 * because only the di_next_unlinked from the inodes in the buffer should be
706 * recovered. The rest of the data in the buffer is logged via the inodes
707 * themselves.
1da177e4 708 *
ccf7c23f
DC
709 * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
710 * transferred to the buffer's log format structure so that we'll know what to
711 * do at recovery time.
1da177e4 712 */
1da177e4
LT
713void
714xfs_trans_inode_buf(
715 xfs_trans_t *tp,
716 xfs_buf_t *bp)
717{
adadbeef 718 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 719
bf9d9013 720 ASSERT(bp->b_transp == tp);
adadbeef 721 ASSERT(bip != NULL);
1da177e4
LT
722 ASSERT(atomic_read(&bip->bli_refcount) > 0);
723
ccf7c23f 724 bip->bli_flags |= XFS_BLI_INODE_BUF;
1da177e4
LT
725}
726
727/*
728 * This call is used to indicate that the buffer is going to
729 * be staled and was an inode buffer. This means it gets
730 * special processing during unpin - where any inodes
731 * associated with the buffer should be removed from ail.
732 * There is also special processing during recovery,
733 * any replay of the inodes in the buffer needs to be
734 * prevented as the buffer may have been reused.
735 */
736void
737xfs_trans_stale_inode_buf(
738 xfs_trans_t *tp,
739 xfs_buf_t *bp)
740{
adadbeef 741 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 742
bf9d9013 743 ASSERT(bp->b_transp == tp);
adadbeef 744 ASSERT(bip != NULL);
1da177e4
LT
745 ASSERT(atomic_read(&bip->bli_refcount) > 0);
746
747 bip->bli_flags |= XFS_BLI_STALE_INODE;
ca30b2a7 748 bip->bli_item.li_cb = xfs_buf_iodone;
1da177e4
LT
749}
750
1da177e4
LT
751/*
752 * Mark the buffer as being one which contains newly allocated
753 * inodes. We need to make sure that even if this buffer is
754 * relogged as an 'inode buf' we still recover all of the inode
755 * images in the face of a crash. This works in coordination with
756 * xfs_buf_item_committed() to ensure that the buffer remains in the
757 * AIL at its original location even after it has been relogged.
758 */
759/* ARGSUSED */
760void
761xfs_trans_inode_alloc_buf(
762 xfs_trans_t *tp,
763 xfs_buf_t *bp)
764{
adadbeef 765 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 766
bf9d9013 767 ASSERT(bp->b_transp == tp);
adadbeef 768 ASSERT(bip != NULL);
1da177e4
LT
769 ASSERT(atomic_read(&bip->bli_refcount) > 0);
770
771 bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
772}
773
774
775/*
776 * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
777 * dquots. However, unlike in inode buffer recovery, dquot buffers get
778 * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
779 * The only thing that makes dquot buffers different from regular
780 * buffers is that we must not replay dquot bufs when recovering
781 * if a _corresponding_ quotaoff has happened. We also have to distinguish
782 * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
783 * can be turned off independently.
784 */
785/* ARGSUSED */
786void
787xfs_trans_dquot_buf(
788 xfs_trans_t *tp,
789 xfs_buf_t *bp,
790 uint type)
791{
adadbeef 792 xfs_buf_log_item_t *bip = bp->b_fspriv;
1da177e4 793
bf9d9013 794 ASSERT(bp->b_transp == tp);
adadbeef 795 ASSERT(bip != NULL);
c1155410
DC
796 ASSERT(type == XFS_BLF_UDQUOT_BUF ||
797 type == XFS_BLF_PDQUOT_BUF ||
798 type == XFS_BLF_GDQUOT_BUF);
1da177e4
LT
799 ASSERT(atomic_read(&bip->bli_refcount) > 0);
800
801 bip->bli_format.blf_flags |= type;
802}
This page took 0.58619 seconds and 5 git commands to generate.