xfs: decouple log and transaction headers
[deliverable/linux.git] / fs / xfs / xfs_bmap_util.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * Copyright (c) 2012 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_bit.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_format.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_trans.h"
37 #include "xfs_extfree_item.h"
38 #include "xfs_alloc.h"
39 #include "xfs_bmap.h"
40 #include "xfs_bmap_util.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_quota.h"
44 #include "xfs_trans_space.h"
45 #include "xfs_trace.h"
46 #include "xfs_icache.h"
47 #include "xfs_log.h"
48
49 /* Kernel only BMAP related definitions and functions */
50
51 /*
52 * Convert the given file system block to a disk block. We have to treat it
53 * differently based on whether the file is a real time file or not, because the
54 * bmap code does.
55 */
56 xfs_daddr_t
57 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
58 {
59 return (XFS_IS_REALTIME_INODE(ip) ? \
60 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
61 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
62 }
63
64 /*
65 * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
66 * caller. Frees all the extents that need freeing, which must be done
67 * last due to locking considerations. We never free any extents in
68 * the first transaction.
69 *
70 * Return 1 if the given transaction was committed and a new one
71 * started, and 0 otherwise in the committed parameter.
72 */
73 int /* error */
74 xfs_bmap_finish(
75 xfs_trans_t **tp, /* transaction pointer addr */
76 xfs_bmap_free_t *flist, /* i/o: list extents to free */
77 int *committed) /* xact committed or not */
78 {
79 xfs_efd_log_item_t *efd; /* extent free data */
80 xfs_efi_log_item_t *efi; /* extent free intention */
81 int error; /* error return value */
82 xfs_bmap_free_item_t *free; /* free extent item */
83 struct xfs_trans_res tres; /* new log reservation */
84 xfs_mount_t *mp; /* filesystem mount structure */
85 xfs_bmap_free_item_t *next; /* next item on free list */
86 xfs_trans_t *ntp; /* new transaction pointer */
87
88 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
89 if (flist->xbf_count == 0) {
90 *committed = 0;
91 return 0;
92 }
93 ntp = *tp;
94 efi = xfs_trans_get_efi(ntp, flist->xbf_count);
95 for (free = flist->xbf_first; free; free = free->xbfi_next)
96 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
97 free->xbfi_blockcount);
98
99 tres.tr_logres = ntp->t_log_res;
100 tres.tr_logcount = ntp->t_log_count;
101 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
102 ntp = xfs_trans_dup(*tp);
103 error = xfs_trans_commit(*tp, 0);
104 *tp = ntp;
105 *committed = 1;
106 /*
107 * We have a new transaction, so we should return committed=1,
108 * even though we're returning an error.
109 */
110 if (error)
111 return error;
112
113 /*
114 * transaction commit worked ok so we can drop the extra ticket
115 * reference that we gained in xfs_trans_dup()
116 */
117 xfs_log_ticket_put(ntp->t_ticket);
118
119 error = xfs_trans_reserve(ntp, &tres, 0, 0);
120 if (error)
121 return error;
122 efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
123 for (free = flist->xbf_first; free != NULL; free = next) {
124 next = free->xbfi_next;
125 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
126 free->xbfi_blockcount))) {
127 /*
128 * The bmap free list will be cleaned up at a
129 * higher level. The EFI will be canceled when
130 * this transaction is aborted.
131 * Need to force shutdown here to make sure it
132 * happens, since this transaction may not be
133 * dirty yet.
134 */
135 mp = ntp->t_mountp;
136 if (!XFS_FORCED_SHUTDOWN(mp))
137 xfs_force_shutdown(mp,
138 (error == EFSCORRUPTED) ?
139 SHUTDOWN_CORRUPT_INCORE :
140 SHUTDOWN_META_IO_ERROR);
141 return error;
142 }
143 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
144 free->xbfi_blockcount);
145 xfs_bmap_del_free(flist, NULL, free);
146 }
147 return 0;
148 }
149
150 int
151 xfs_bmap_rtalloc(
152 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
153 {
154 xfs_alloctype_t atype = 0; /* type for allocation routines */
155 int error; /* error return value */
156 xfs_mount_t *mp; /* mount point structure */
157 xfs_extlen_t prod = 0; /* product factor for allocators */
158 xfs_extlen_t ralen = 0; /* realtime allocation length */
159 xfs_extlen_t align; /* minimum allocation alignment */
160 xfs_rtblock_t rtb;
161
162 mp = ap->ip->i_mount;
163 align = xfs_get_extsz_hint(ap->ip);
164 prod = align / mp->m_sb.sb_rextsize;
165 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
166 align, 1, ap->eof, 0,
167 ap->conv, &ap->offset, &ap->length);
168 if (error)
169 return error;
170 ASSERT(ap->length);
171 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
172
173 /*
174 * If the offset & length are not perfectly aligned
175 * then kill prod, it will just get us in trouble.
176 */
177 if (do_mod(ap->offset, align) || ap->length % align)
178 prod = 1;
179 /*
180 * Set ralen to be the actual requested length in rtextents.
181 */
182 ralen = ap->length / mp->m_sb.sb_rextsize;
183 /*
184 * If the old value was close enough to MAXEXTLEN that
185 * we rounded up to it, cut it back so it's valid again.
186 * Note that if it's a really large request (bigger than
187 * MAXEXTLEN), we don't hear about that number, and can't
188 * adjust the starting point to match it.
189 */
190 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
191 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
192
193 /*
194 * Lock out other modifications to the RT bitmap inode.
195 */
196 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
197 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
198
199 /*
200 * If it's an allocation to an empty file at offset 0,
201 * pick an extent that will space things out in the rt area.
202 */
203 if (ap->eof && ap->offset == 0) {
204 xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
205
206 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
207 if (error)
208 return error;
209 ap->blkno = rtx * mp->m_sb.sb_rextsize;
210 } else {
211 ap->blkno = 0;
212 }
213
214 xfs_bmap_adjacent(ap);
215
216 /*
217 * Realtime allocation, done through xfs_rtallocate_extent.
218 */
219 atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
220 do_div(ap->blkno, mp->m_sb.sb_rextsize);
221 rtb = ap->blkno;
222 ap->length = ralen;
223 if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
224 &ralen, atype, ap->wasdel, prod, &rtb)))
225 return error;
226 if (rtb == NULLFSBLOCK && prod > 1 &&
227 (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
228 ap->length, &ralen, atype,
229 ap->wasdel, 1, &rtb)))
230 return error;
231 ap->blkno = rtb;
232 if (ap->blkno != NULLFSBLOCK) {
233 ap->blkno *= mp->m_sb.sb_rextsize;
234 ralen *= mp->m_sb.sb_rextsize;
235 ap->length = ralen;
236 ap->ip->i_d.di_nblocks += ralen;
237 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
238 if (ap->wasdel)
239 ap->ip->i_delayed_blks -= ralen;
240 /*
241 * Adjust the disk quota also. This was reserved
242 * earlier.
243 */
244 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
245 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
246 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
247 } else {
248 ap->length = 0;
249 }
250 return 0;
251 }
252
253 /*
254 * Stack switching interfaces for allocation
255 */
256 static void
257 xfs_bmapi_allocate_worker(
258 struct work_struct *work)
259 {
260 struct xfs_bmalloca *args = container_of(work,
261 struct xfs_bmalloca, work);
262 unsigned long pflags;
263
264 /* we are in a transaction context here */
265 current_set_flags_nested(&pflags, PF_FSTRANS);
266
267 args->result = __xfs_bmapi_allocate(args);
268 complete(args->done);
269
270 current_restore_flags_nested(&pflags, PF_FSTRANS);
271 }
272
273 /*
274 * Some allocation requests often come in with little stack to work on. Push
275 * them off to a worker thread so there is lots of stack to use. Otherwise just
276 * call directly to avoid the context switch overhead here.
277 */
278 int
279 xfs_bmapi_allocate(
280 struct xfs_bmalloca *args)
281 {
282 DECLARE_COMPLETION_ONSTACK(done);
283
284 if (!args->stack_switch)
285 return __xfs_bmapi_allocate(args);
286
287
288 args->done = &done;
289 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
290 queue_work(xfs_alloc_wq, &args->work);
291 wait_for_completion(&done);
292 return args->result;
293 }
294
295 /*
296 * Check if the endoff is outside the last extent. If so the caller will grow
297 * the allocation to a stripe unit boundary. All offsets are considered outside
298 * the end of file for an empty fork, so 1 is returned in *eof in that case.
299 */
300 int
301 xfs_bmap_eof(
302 struct xfs_inode *ip,
303 xfs_fileoff_t endoff,
304 int whichfork,
305 int *eof)
306 {
307 struct xfs_bmbt_irec rec;
308 int error;
309
310 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
311 if (error || *eof)
312 return error;
313
314 *eof = endoff >= rec.br_startoff + rec.br_blockcount;
315 return 0;
316 }
317
318 /*
319 * Extent tree block counting routines.
320 */
321
322 /*
323 * Count leaf blocks given a range of extent records.
324 */
325 STATIC void
326 xfs_bmap_count_leaves(
327 xfs_ifork_t *ifp,
328 xfs_extnum_t idx,
329 int numrecs,
330 int *count)
331 {
332 int b;
333
334 for (b = 0; b < numrecs; b++) {
335 xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
336 *count += xfs_bmbt_get_blockcount(frp);
337 }
338 }
339
340 /*
341 * Count leaf blocks given a range of extent records originally
342 * in btree format.
343 */
344 STATIC void
345 xfs_bmap_disk_count_leaves(
346 struct xfs_mount *mp,
347 struct xfs_btree_block *block,
348 int numrecs,
349 int *count)
350 {
351 int b;
352 xfs_bmbt_rec_t *frp;
353
354 for (b = 1; b <= numrecs; b++) {
355 frp = XFS_BMBT_REC_ADDR(mp, block, b);
356 *count += xfs_bmbt_disk_get_blockcount(frp);
357 }
358 }
359
360 /*
361 * Recursively walks each level of a btree
362 * to count total fsblocks in use.
363 */
364 STATIC int /* error */
365 xfs_bmap_count_tree(
366 xfs_mount_t *mp, /* file system mount point */
367 xfs_trans_t *tp, /* transaction pointer */
368 xfs_ifork_t *ifp, /* inode fork pointer */
369 xfs_fsblock_t blockno, /* file system block number */
370 int levelin, /* level in btree */
371 int *count) /* Count of blocks */
372 {
373 int error;
374 xfs_buf_t *bp, *nbp;
375 int level = levelin;
376 __be64 *pp;
377 xfs_fsblock_t bno = blockno;
378 xfs_fsblock_t nextbno;
379 struct xfs_btree_block *block, *nextblock;
380 int numrecs;
381
382 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
383 &xfs_bmbt_buf_ops);
384 if (error)
385 return error;
386 *count += 1;
387 block = XFS_BUF_TO_BLOCK(bp);
388
389 if (--level) {
390 /* Not at node above leaves, count this level of nodes */
391 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
392 while (nextbno != NULLFSBLOCK) {
393 error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
394 XFS_BMAP_BTREE_REF,
395 &xfs_bmbt_buf_ops);
396 if (error)
397 return error;
398 *count += 1;
399 nextblock = XFS_BUF_TO_BLOCK(nbp);
400 nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
401 xfs_trans_brelse(tp, nbp);
402 }
403
404 /* Dive to the next level */
405 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
406 bno = be64_to_cpu(*pp);
407 if (unlikely((error =
408 xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
409 xfs_trans_brelse(tp, bp);
410 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
411 XFS_ERRLEVEL_LOW, mp);
412 return XFS_ERROR(EFSCORRUPTED);
413 }
414 xfs_trans_brelse(tp, bp);
415 } else {
416 /* count all level 1 nodes and their leaves */
417 for (;;) {
418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
419 numrecs = be16_to_cpu(block->bb_numrecs);
420 xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
421 xfs_trans_brelse(tp, bp);
422 if (nextbno == NULLFSBLOCK)
423 break;
424 bno = nextbno;
425 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
426 XFS_BMAP_BTREE_REF,
427 &xfs_bmbt_buf_ops);
428 if (error)
429 return error;
430 *count += 1;
431 block = XFS_BUF_TO_BLOCK(bp);
432 }
433 }
434 return 0;
435 }
436
437 /*
438 * Count fsblocks of the given fork.
439 */
440 int /* error */
441 xfs_bmap_count_blocks(
442 xfs_trans_t *tp, /* transaction pointer */
443 xfs_inode_t *ip, /* incore inode */
444 int whichfork, /* data or attr fork */
445 int *count) /* out: count of blocks */
446 {
447 struct xfs_btree_block *block; /* current btree block */
448 xfs_fsblock_t bno; /* block # of "block" */
449 xfs_ifork_t *ifp; /* fork structure */
450 int level; /* btree level, for checking */
451 xfs_mount_t *mp; /* file system mount structure */
452 __be64 *pp; /* pointer to block address */
453
454 bno = NULLFSBLOCK;
455 mp = ip->i_mount;
456 ifp = XFS_IFORK_PTR(ip, whichfork);
457 if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
458 xfs_bmap_count_leaves(ifp, 0,
459 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
460 count);
461 return 0;
462 }
463
464 /*
465 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
466 */
467 block = ifp->if_broot;
468 level = be16_to_cpu(block->bb_level);
469 ASSERT(level > 0);
470 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
471 bno = be64_to_cpu(*pp);
472 ASSERT(bno != NULLDFSBNO);
473 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
474 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
475
476 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
477 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
478 mp);
479 return XFS_ERROR(EFSCORRUPTED);
480 }
481
482 return 0;
483 }
484
485 /*
486 * returns 1 for success, 0 if we failed to map the extent.
487 */
488 STATIC int
489 xfs_getbmapx_fix_eof_hole(
490 xfs_inode_t *ip, /* xfs incore inode pointer */
491 struct getbmapx *out, /* output structure */
492 int prealloced, /* this is a file with
493 * preallocated data space */
494 __int64_t end, /* last block requested */
495 xfs_fsblock_t startblock)
496 {
497 __int64_t fixlen;
498 xfs_mount_t *mp; /* file system mount point */
499 xfs_ifork_t *ifp; /* inode fork pointer */
500 xfs_extnum_t lastx; /* last extent pointer */
501 xfs_fileoff_t fileblock;
502
503 if (startblock == HOLESTARTBLOCK) {
504 mp = ip->i_mount;
505 out->bmv_block = -1;
506 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
507 fixlen -= out->bmv_offset;
508 if (prealloced && out->bmv_offset + out->bmv_length == end) {
509 /* Came to hole at EOF. Trim it. */
510 if (fixlen <= 0)
511 return 0;
512 out->bmv_length = fixlen;
513 }
514 } else {
515 if (startblock == DELAYSTARTBLOCK)
516 out->bmv_block = -2;
517 else
518 out->bmv_block = xfs_fsb_to_db(ip, startblock);
519 fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
520 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
521 if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
522 (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
523 out->bmv_oflags |= BMV_OF_LAST;
524 }
525
526 return 1;
527 }
528
529 /*
530 * Get inode's extents as described in bmv, and format for output.
531 * Calls formatter to fill the user's buffer until all extents
532 * are mapped, until the passed-in bmv->bmv_count slots have
533 * been filled, or until the formatter short-circuits the loop,
534 * if it is tracking filled-in extents on its own.
535 */
536 int /* error code */
537 xfs_getbmap(
538 xfs_inode_t *ip,
539 struct getbmapx *bmv, /* user bmap structure */
540 xfs_bmap_format_t formatter, /* format to user */
541 void *arg) /* formatter arg */
542 {
543 __int64_t bmvend; /* last block requested */
544 int error = 0; /* return value */
545 __int64_t fixlen; /* length for -1 case */
546 int i; /* extent number */
547 int lock; /* lock state */
548 xfs_bmbt_irec_t *map; /* buffer for user's data */
549 xfs_mount_t *mp; /* file system mount point */
550 int nex; /* # of user extents can do */
551 int nexleft; /* # of user extents left */
552 int subnex; /* # of bmapi's can do */
553 int nmap; /* number of map entries */
554 struct getbmapx *out; /* output structure */
555 int whichfork; /* data or attr fork */
556 int prealloced; /* this is a file with
557 * preallocated data space */
558 int iflags; /* interface flags */
559 int bmapi_flags; /* flags for xfs_bmapi */
560 int cur_ext = 0;
561
562 mp = ip->i_mount;
563 iflags = bmv->bmv_iflags;
564 whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
565
566 if (whichfork == XFS_ATTR_FORK) {
567 if (XFS_IFORK_Q(ip)) {
568 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
569 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
570 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
571 return XFS_ERROR(EINVAL);
572 } else if (unlikely(
573 ip->i_d.di_aformat != 0 &&
574 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
575 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
576 ip->i_mount);
577 return XFS_ERROR(EFSCORRUPTED);
578 }
579
580 prealloced = 0;
581 fixlen = 1LL << 32;
582 } else {
583 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
584 ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
585 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
586 return XFS_ERROR(EINVAL);
587
588 if (xfs_get_extsz_hint(ip) ||
589 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
590 prealloced = 1;
591 fixlen = mp->m_super->s_maxbytes;
592 } else {
593 prealloced = 0;
594 fixlen = XFS_ISIZE(ip);
595 }
596 }
597
598 if (bmv->bmv_length == -1) {
599 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
600 bmv->bmv_length =
601 max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
602 } else if (bmv->bmv_length == 0) {
603 bmv->bmv_entries = 0;
604 return 0;
605 } else if (bmv->bmv_length < 0) {
606 return XFS_ERROR(EINVAL);
607 }
608
609 nex = bmv->bmv_count - 1;
610 if (nex <= 0)
611 return XFS_ERROR(EINVAL);
612 bmvend = bmv->bmv_offset + bmv->bmv_length;
613
614
615 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
616 return XFS_ERROR(ENOMEM);
617 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
618 if (!out)
619 return XFS_ERROR(ENOMEM);
620
621 xfs_ilock(ip, XFS_IOLOCK_SHARED);
622 if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
623 if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
624 error = -filemap_write_and_wait(VFS_I(ip)->i_mapping);
625 if (error)
626 goto out_unlock_iolock;
627 }
628 /*
629 * even after flushing the inode, there can still be delalloc
630 * blocks on the inode beyond EOF due to speculative
631 * preallocation. These are not removed until the release
632 * function is called or the inode is inactivated. Hence we
633 * cannot assert here that ip->i_delayed_blks == 0.
634 */
635 }
636
637 lock = xfs_ilock_map_shared(ip);
638
639 /*
640 * Don't let nex be bigger than the number of extents
641 * we can have assuming alternating holes and real extents.
642 */
643 if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
644 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
645
646 bmapi_flags = xfs_bmapi_aflag(whichfork);
647 if (!(iflags & BMV_IF_PREALLOC))
648 bmapi_flags |= XFS_BMAPI_IGSTATE;
649
650 /*
651 * Allocate enough space to handle "subnex" maps at a time.
652 */
653 error = ENOMEM;
654 subnex = 16;
655 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
656 if (!map)
657 goto out_unlock_ilock;
658
659 bmv->bmv_entries = 0;
660
661 if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
662 (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
663 error = 0;
664 goto out_free_map;
665 }
666
667 nexleft = nex;
668
669 do {
670 nmap = (nexleft > subnex) ? subnex : nexleft;
671 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
672 XFS_BB_TO_FSB(mp, bmv->bmv_length),
673 map, &nmap, bmapi_flags);
674 if (error)
675 goto out_free_map;
676 ASSERT(nmap <= subnex);
677
678 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
679 out[cur_ext].bmv_oflags = 0;
680 if (map[i].br_state == XFS_EXT_UNWRITTEN)
681 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
682 else if (map[i].br_startblock == DELAYSTARTBLOCK)
683 out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
684 out[cur_ext].bmv_offset =
685 XFS_FSB_TO_BB(mp, map[i].br_startoff);
686 out[cur_ext].bmv_length =
687 XFS_FSB_TO_BB(mp, map[i].br_blockcount);
688 out[cur_ext].bmv_unused1 = 0;
689 out[cur_ext].bmv_unused2 = 0;
690
691 /*
692 * delayed allocation extents that start beyond EOF can
693 * occur due to speculative EOF allocation when the
694 * delalloc extent is larger than the largest freespace
695 * extent at conversion time. These extents cannot be
696 * converted by data writeback, so can exist here even
697 * if we are not supposed to be finding delalloc
698 * extents.
699 */
700 if (map[i].br_startblock == DELAYSTARTBLOCK &&
701 map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
702 ASSERT((iflags & BMV_IF_DELALLOC) != 0);
703
704 if (map[i].br_startblock == HOLESTARTBLOCK &&
705 whichfork == XFS_ATTR_FORK) {
706 /* came to the end of attribute fork */
707 out[cur_ext].bmv_oflags |= BMV_OF_LAST;
708 goto out_free_map;
709 }
710
711 if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
712 prealloced, bmvend,
713 map[i].br_startblock))
714 goto out_free_map;
715
716 bmv->bmv_offset =
717 out[cur_ext].bmv_offset +
718 out[cur_ext].bmv_length;
719 bmv->bmv_length =
720 max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
721
722 /*
723 * In case we don't want to return the hole,
724 * don't increase cur_ext so that we can reuse
725 * it in the next loop.
726 */
727 if ((iflags & BMV_IF_NO_HOLES) &&
728 map[i].br_startblock == HOLESTARTBLOCK) {
729 memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
730 continue;
731 }
732
733 nexleft--;
734 bmv->bmv_entries++;
735 cur_ext++;
736 }
737 } while (nmap && nexleft && bmv->bmv_length);
738
739 out_free_map:
740 kmem_free(map);
741 out_unlock_ilock:
742 xfs_iunlock_map_shared(ip, lock);
743 out_unlock_iolock:
744 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
745
746 for (i = 0; i < cur_ext; i++) {
747 int full = 0; /* user array is full */
748
749 /* format results & advance arg */
750 error = formatter(&arg, &out[i], &full);
751 if (error || full)
752 break;
753 }
754
755 kmem_free(out);
756 return error;
757 }
758
759 /*
760 * dead simple method of punching delalyed allocation blocks from a range in
761 * the inode. Walks a block at a time so will be slow, but is only executed in
762 * rare error cases so the overhead is not critical. This will always punch out
763 * both the start and end blocks, even if the ranges only partially overlap
764 * them, so it is up to the caller to ensure that partial blocks are not
765 * passed in.
766 */
767 int
768 xfs_bmap_punch_delalloc_range(
769 struct xfs_inode *ip,
770 xfs_fileoff_t start_fsb,
771 xfs_fileoff_t length)
772 {
773 xfs_fileoff_t remaining = length;
774 int error = 0;
775
776 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
777
778 do {
779 int done;
780 xfs_bmbt_irec_t imap;
781 int nimaps = 1;
782 xfs_fsblock_t firstblock;
783 xfs_bmap_free_t flist;
784
785 /*
786 * Map the range first and check that it is a delalloc extent
787 * before trying to unmap the range. Otherwise we will be
788 * trying to remove a real extent (which requires a
789 * transaction) or a hole, which is probably a bad idea...
790 */
791 error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
792 XFS_BMAPI_ENTIRE);
793
794 if (error) {
795 /* something screwed, just bail */
796 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
797 xfs_alert(ip->i_mount,
798 "Failed delalloc mapping lookup ino %lld fsb %lld.",
799 ip->i_ino, start_fsb);
800 }
801 break;
802 }
803 if (!nimaps) {
804 /* nothing there */
805 goto next_block;
806 }
807 if (imap.br_startblock != DELAYSTARTBLOCK) {
808 /* been converted, ignore */
809 goto next_block;
810 }
811 WARN_ON(imap.br_blockcount == 0);
812
813 /*
814 * Note: while we initialise the firstblock/flist pair, they
815 * should never be used because blocks should never be
816 * allocated or freed for a delalloc extent and hence we need
817 * don't cancel or finish them after the xfs_bunmapi() call.
818 */
819 xfs_bmap_init(&flist, &firstblock);
820 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
821 &flist, &done);
822 if (error)
823 break;
824
825 ASSERT(!flist.xbf_count && !flist.xbf_first);
826 next_block:
827 start_fsb++;
828 remaining--;
829 } while(remaining > 0);
830
831 return error;
832 }
833
834 /*
835 * Test whether it is appropriate to check an inode for and free post EOF
836 * blocks. The 'force' parameter determines whether we should also consider
837 * regular files that are marked preallocated or append-only.
838 */
839 bool
840 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
841 {
842 /* prealloc/delalloc exists only on regular files */
843 if (!S_ISREG(ip->i_d.di_mode))
844 return false;
845
846 /*
847 * Zero sized files with no cached pages and delalloc blocks will not
848 * have speculative prealloc/delalloc blocks to remove.
849 */
850 if (VFS_I(ip)->i_size == 0 &&
851 VN_CACHED(VFS_I(ip)) == 0 &&
852 ip->i_delayed_blks == 0)
853 return false;
854
855 /* If we haven't read in the extent list, then don't do it now. */
856 if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
857 return false;
858
859 /*
860 * Do not free real preallocated or append-only files unless the file
861 * has delalloc blocks and we are forced to remove them.
862 */
863 if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
864 if (!force || ip->i_delayed_blks == 0)
865 return false;
866
867 return true;
868 }
869
870 /*
871 * This is called by xfs_inactive to free any blocks beyond eof
872 * when the link count isn't zero and by xfs_dm_punch_hole() when
873 * punching a hole to EOF.
874 */
875 int
876 xfs_free_eofblocks(
877 xfs_mount_t *mp,
878 xfs_inode_t *ip,
879 bool need_iolock)
880 {
881 xfs_trans_t *tp;
882 int error;
883 xfs_fileoff_t end_fsb;
884 xfs_fileoff_t last_fsb;
885 xfs_filblks_t map_len;
886 int nimaps;
887 xfs_bmbt_irec_t imap;
888
889 /*
890 * Figure out if there are any blocks beyond the end
891 * of the file. If not, then there is nothing to do.
892 */
893 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
894 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
895 if (last_fsb <= end_fsb)
896 return 0;
897 map_len = last_fsb - end_fsb;
898
899 nimaps = 1;
900 xfs_ilock(ip, XFS_ILOCK_SHARED);
901 error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
902 xfs_iunlock(ip, XFS_ILOCK_SHARED);
903
904 if (!error && (nimaps != 0) &&
905 (imap.br_startblock != HOLESTARTBLOCK ||
906 ip->i_delayed_blks)) {
907 /*
908 * Attach the dquots to the inode up front.
909 */
910 error = xfs_qm_dqattach(ip, 0);
911 if (error)
912 return error;
913
914 /*
915 * There are blocks after the end of file.
916 * Free them up now by truncating the file to
917 * its current size.
918 */
919 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
920
921 if (need_iolock) {
922 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
923 xfs_trans_cancel(tp, 0);
924 return EAGAIN;
925 }
926 }
927
928 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
929 if (error) {
930 ASSERT(XFS_FORCED_SHUTDOWN(mp));
931 xfs_trans_cancel(tp, 0);
932 if (need_iolock)
933 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
934 return error;
935 }
936
937 xfs_ilock(ip, XFS_ILOCK_EXCL);
938 xfs_trans_ijoin(tp, ip, 0);
939
940 /*
941 * Do not update the on-disk file size. If we update the
942 * on-disk file size and then the system crashes before the
943 * contents of the file are flushed to disk then the files
944 * may be full of holes (ie NULL files bug).
945 */
946 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
947 XFS_ISIZE(ip));
948 if (error) {
949 /*
950 * If we get an error at this point we simply don't
951 * bother truncating the file.
952 */
953 xfs_trans_cancel(tp,
954 (XFS_TRANS_RELEASE_LOG_RES |
955 XFS_TRANS_ABORT));
956 } else {
957 error = xfs_trans_commit(tp,
958 XFS_TRANS_RELEASE_LOG_RES);
959 if (!error)
960 xfs_inode_clear_eofblocks_tag(ip);
961 }
962
963 xfs_iunlock(ip, XFS_ILOCK_EXCL);
964 if (need_iolock)
965 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
966 }
967 return error;
968 }
969
970 int
971 xfs_alloc_file_space(
972 struct xfs_inode *ip,
973 xfs_off_t offset,
974 xfs_off_t len,
975 int alloc_type)
976 {
977 xfs_mount_t *mp = ip->i_mount;
978 xfs_off_t count;
979 xfs_filblks_t allocated_fsb;
980 xfs_filblks_t allocatesize_fsb;
981 xfs_extlen_t extsz, temp;
982 xfs_fileoff_t startoffset_fsb;
983 xfs_fsblock_t firstfsb;
984 int nimaps;
985 int quota_flag;
986 int rt;
987 xfs_trans_t *tp;
988 xfs_bmbt_irec_t imaps[1], *imapp;
989 xfs_bmap_free_t free_list;
990 uint qblocks, resblks, resrtextents;
991 int committed;
992 int error;
993
994 trace_xfs_alloc_file_space(ip);
995
996 if (XFS_FORCED_SHUTDOWN(mp))
997 return XFS_ERROR(EIO);
998
999 error = xfs_qm_dqattach(ip, 0);
1000 if (error)
1001 return error;
1002
1003 if (len <= 0)
1004 return XFS_ERROR(EINVAL);
1005
1006 rt = XFS_IS_REALTIME_INODE(ip);
1007 extsz = xfs_get_extsz_hint(ip);
1008
1009 count = len;
1010 imapp = &imaps[0];
1011 nimaps = 1;
1012 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1013 allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1014
1015 /*
1016 * Allocate file space until done or until there is an error
1017 */
1018 while (allocatesize_fsb && !error) {
1019 xfs_fileoff_t s, e;
1020
1021 /*
1022 * Determine space reservations for data/realtime.
1023 */
1024 if (unlikely(extsz)) {
1025 s = startoffset_fsb;
1026 do_div(s, extsz);
1027 s *= extsz;
1028 e = startoffset_fsb + allocatesize_fsb;
1029 if ((temp = do_mod(startoffset_fsb, extsz)))
1030 e += temp;
1031 if ((temp = do_mod(e, extsz)))
1032 e += extsz - temp;
1033 } else {
1034 s = 0;
1035 e = allocatesize_fsb;
1036 }
1037
1038 /*
1039 * The transaction reservation is limited to a 32-bit block
1040 * count, hence we need to limit the number of blocks we are
1041 * trying to reserve to avoid an overflow. We can't allocate
1042 * more than @nimaps extents, and an extent is limited on disk
1043 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1044 */
1045 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1046 if (unlikely(rt)) {
1047 resrtextents = qblocks = resblks;
1048 resrtextents /= mp->m_sb.sb_rextsize;
1049 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1050 quota_flag = XFS_QMOPT_RES_RTBLKS;
1051 } else {
1052 resrtextents = 0;
1053 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1054 quota_flag = XFS_QMOPT_RES_REGBLKS;
1055 }
1056
1057 /*
1058 * Allocate and setup the transaction.
1059 */
1060 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1061 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
1062 resblks, resrtextents);
1063 /*
1064 * Check for running out of space
1065 */
1066 if (error) {
1067 /*
1068 * Free the transaction structure.
1069 */
1070 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1071 xfs_trans_cancel(tp, 0);
1072 break;
1073 }
1074 xfs_ilock(ip, XFS_ILOCK_EXCL);
1075 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1076 0, quota_flag);
1077 if (error)
1078 goto error1;
1079
1080 xfs_trans_ijoin(tp, ip, 0);
1081
1082 xfs_bmap_init(&free_list, &firstfsb);
1083 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1084 allocatesize_fsb, alloc_type, &firstfsb,
1085 0, imapp, &nimaps, &free_list);
1086 if (error) {
1087 goto error0;
1088 }
1089
1090 /*
1091 * Complete the transaction
1092 */
1093 error = xfs_bmap_finish(&tp, &free_list, &committed);
1094 if (error) {
1095 goto error0;
1096 }
1097
1098 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1099 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1100 if (error) {
1101 break;
1102 }
1103
1104 allocated_fsb = imapp->br_blockcount;
1105
1106 if (nimaps == 0) {
1107 error = XFS_ERROR(ENOSPC);
1108 break;
1109 }
1110
1111 startoffset_fsb += allocated_fsb;
1112 allocatesize_fsb -= allocated_fsb;
1113 }
1114
1115 return error;
1116
1117 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1118 xfs_bmap_cancel(&free_list);
1119 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1120
1121 error1: /* Just cancel transaction */
1122 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1123 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1124 return error;
1125 }
1126
1127 /*
1128 * Zero file bytes between startoff and endoff inclusive.
1129 * The iolock is held exclusive and no blocks are buffered.
1130 *
1131 * This function is used by xfs_free_file_space() to zero
1132 * partial blocks when the range to free is not block aligned.
1133 * When unreserving space with boundaries that are not block
1134 * aligned we round up the start and round down the end
1135 * boundaries and then use this function to zero the parts of
1136 * the blocks that got dropped during the rounding.
1137 */
1138 STATIC int
1139 xfs_zero_remaining_bytes(
1140 xfs_inode_t *ip,
1141 xfs_off_t startoff,
1142 xfs_off_t endoff)
1143 {
1144 xfs_bmbt_irec_t imap;
1145 xfs_fileoff_t offset_fsb;
1146 xfs_off_t lastoffset;
1147 xfs_off_t offset;
1148 xfs_buf_t *bp;
1149 xfs_mount_t *mp = ip->i_mount;
1150 int nimap;
1151 int error = 0;
1152
1153 /*
1154 * Avoid doing I/O beyond eof - it's not necessary
1155 * since nothing can read beyond eof. The space will
1156 * be zeroed when the file is extended anyway.
1157 */
1158 if (startoff >= XFS_ISIZE(ip))
1159 return 0;
1160
1161 if (endoff > XFS_ISIZE(ip))
1162 endoff = XFS_ISIZE(ip);
1163
1164 bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1165 mp->m_rtdev_targp : mp->m_ddev_targp,
1166 BTOBB(mp->m_sb.sb_blocksize), 0);
1167 if (!bp)
1168 return XFS_ERROR(ENOMEM);
1169
1170 xfs_buf_unlock(bp);
1171
1172 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1173 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1174 nimap = 1;
1175 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1176 if (error || nimap < 1)
1177 break;
1178 ASSERT(imap.br_blockcount >= 1);
1179 ASSERT(imap.br_startoff == offset_fsb);
1180 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1181 if (lastoffset > endoff)
1182 lastoffset = endoff;
1183 if (imap.br_startblock == HOLESTARTBLOCK)
1184 continue;
1185 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1186 if (imap.br_state == XFS_EXT_UNWRITTEN)
1187 continue;
1188 XFS_BUF_UNDONE(bp);
1189 XFS_BUF_UNWRITE(bp);
1190 XFS_BUF_READ(bp);
1191 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1192 xfsbdstrat(mp, bp);
1193 error = xfs_buf_iowait(bp);
1194 if (error) {
1195 xfs_buf_ioerror_alert(bp,
1196 "xfs_zero_remaining_bytes(read)");
1197 break;
1198 }
1199 memset(bp->b_addr +
1200 (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
1201 0, lastoffset - offset + 1);
1202 XFS_BUF_UNDONE(bp);
1203 XFS_BUF_UNREAD(bp);
1204 XFS_BUF_WRITE(bp);
1205 xfsbdstrat(mp, bp);
1206 error = xfs_buf_iowait(bp);
1207 if (error) {
1208 xfs_buf_ioerror_alert(bp,
1209 "xfs_zero_remaining_bytes(write)");
1210 break;
1211 }
1212 }
1213 xfs_buf_free(bp);
1214 return error;
1215 }
1216
1217 int
1218 xfs_free_file_space(
1219 struct xfs_inode *ip,
1220 xfs_off_t offset,
1221 xfs_off_t len)
1222 {
1223 int committed;
1224 int done;
1225 xfs_fileoff_t endoffset_fsb;
1226 int error;
1227 xfs_fsblock_t firstfsb;
1228 xfs_bmap_free_t free_list;
1229 xfs_bmbt_irec_t imap;
1230 xfs_off_t ioffset;
1231 xfs_extlen_t mod=0;
1232 xfs_mount_t *mp;
1233 int nimap;
1234 uint resblks;
1235 xfs_off_t rounding;
1236 int rt;
1237 xfs_fileoff_t startoffset_fsb;
1238 xfs_trans_t *tp;
1239
1240 mp = ip->i_mount;
1241
1242 trace_xfs_free_file_space(ip);
1243
1244 error = xfs_qm_dqattach(ip, 0);
1245 if (error)
1246 return error;
1247
1248 error = 0;
1249 if (len <= 0) /* if nothing being freed */
1250 return error;
1251 rt = XFS_IS_REALTIME_INODE(ip);
1252 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1253 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1254
1255 /* wait for the completion of any pending DIOs */
1256 inode_dio_wait(VFS_I(ip));
1257
1258 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1259 ioffset = offset & ~(rounding - 1);
1260 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1261 ioffset, -1);
1262 if (error)
1263 goto out;
1264 truncate_pagecache_range(VFS_I(ip), ioffset, -1);
1265
1266 /*
1267 * Need to zero the stuff we're not freeing, on disk.
1268 * If it's a realtime file & can't use unwritten extents then we
1269 * actually need to zero the extent edges. Otherwise xfs_bunmapi
1270 * will take care of it for us.
1271 */
1272 if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1273 nimap = 1;
1274 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
1275 &imap, &nimap, 0);
1276 if (error)
1277 goto out;
1278 ASSERT(nimap == 0 || nimap == 1);
1279 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1280 xfs_daddr_t block;
1281
1282 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1283 block = imap.br_startblock;
1284 mod = do_div(block, mp->m_sb.sb_rextsize);
1285 if (mod)
1286 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1287 }
1288 nimap = 1;
1289 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
1290 &imap, &nimap, 0);
1291 if (error)
1292 goto out;
1293 ASSERT(nimap == 0 || nimap == 1);
1294 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1295 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1296 mod++;
1297 if (mod && (mod != mp->m_sb.sb_rextsize))
1298 endoffset_fsb -= mod;
1299 }
1300 }
1301 if ((done = (endoffset_fsb <= startoffset_fsb)))
1302 /*
1303 * One contiguous piece to clear
1304 */
1305 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
1306 else {
1307 /*
1308 * Some full blocks, possibly two pieces to clear
1309 */
1310 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
1311 error = xfs_zero_remaining_bytes(ip, offset,
1312 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
1313 if (!error &&
1314 XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
1315 error = xfs_zero_remaining_bytes(ip,
1316 XFS_FSB_TO_B(mp, endoffset_fsb),
1317 offset + len - 1);
1318 }
1319
1320 /*
1321 * free file space until done or until there is an error
1322 */
1323 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1324 while (!error && !done) {
1325
1326 /*
1327 * allocate and setup the transaction. Allow this
1328 * transaction to dip into the reserve blocks to ensure
1329 * the freeing of the space succeeds at ENOSPC.
1330 */
1331 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1332 tp->t_flags |= XFS_TRANS_RESERVE;
1333 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
1334
1335 /*
1336 * check for running out of space
1337 */
1338 if (error) {
1339 /*
1340 * Free the transaction structure.
1341 */
1342 ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1343 xfs_trans_cancel(tp, 0);
1344 break;
1345 }
1346 xfs_ilock(ip, XFS_ILOCK_EXCL);
1347 error = xfs_trans_reserve_quota(tp, mp,
1348 ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
1349 resblks, 0, XFS_QMOPT_RES_REGBLKS);
1350 if (error)
1351 goto error1;
1352
1353 xfs_trans_ijoin(tp, ip, 0);
1354
1355 /*
1356 * issue the bunmapi() call to free the blocks
1357 */
1358 xfs_bmap_init(&free_list, &firstfsb);
1359 error = xfs_bunmapi(tp, ip, startoffset_fsb,
1360 endoffset_fsb - startoffset_fsb,
1361 0, 2, &firstfsb, &free_list, &done);
1362 if (error) {
1363 goto error0;
1364 }
1365
1366 /*
1367 * complete the transaction
1368 */
1369 error = xfs_bmap_finish(&tp, &free_list, &committed);
1370 if (error) {
1371 goto error0;
1372 }
1373
1374 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1375 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1376 }
1377
1378 out:
1379 return error;
1380
1381 error0:
1382 xfs_bmap_cancel(&free_list);
1383 error1:
1384 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1385 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1386 goto out;
1387 }
1388
1389
1390 int
1391 xfs_zero_file_space(
1392 struct xfs_inode *ip,
1393 xfs_off_t offset,
1394 xfs_off_t len)
1395 {
1396 struct xfs_mount *mp = ip->i_mount;
1397 uint granularity;
1398 xfs_off_t start_boundary;
1399 xfs_off_t end_boundary;
1400 int error;
1401
1402 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1403
1404 /*
1405 * Round the range of extents we are going to convert inwards. If the
1406 * offset is aligned, then it doesn't get changed so we zero from the
1407 * start of the block offset points to.
1408 */
1409 start_boundary = round_up(offset, granularity);
1410 end_boundary = round_down(offset + len, granularity);
1411
1412 ASSERT(start_boundary >= offset);
1413 ASSERT(end_boundary <= offset + len);
1414
1415 if (start_boundary < end_boundary - 1) {
1416 /* punch out the page cache over the conversion range */
1417 truncate_pagecache_range(VFS_I(ip), start_boundary,
1418 end_boundary - 1);
1419 /* convert the blocks */
1420 error = xfs_alloc_file_space(ip, start_boundary,
1421 end_boundary - start_boundary - 1,
1422 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1423 if (error)
1424 goto out;
1425
1426 /* We've handled the interior of the range, now for the edges */
1427 if (start_boundary != offset) {
1428 error = xfs_iozero(ip, offset, start_boundary - offset);
1429 if (error)
1430 goto out;
1431 }
1432
1433 if (end_boundary != offset + len)
1434 error = xfs_iozero(ip, end_boundary,
1435 offset + len - end_boundary);
1436
1437 } else {
1438 /*
1439 * It's either a sub-granularity range or the range spanned lies
1440 * partially across two adjacent blocks.
1441 */
1442 error = xfs_iozero(ip, offset, len);
1443 }
1444
1445 out:
1446 return error;
1447
1448 }
1449
1450 /*
1451 * We need to check that the format of the data fork in the temporary inode is
1452 * valid for the target inode before doing the swap. This is not a problem with
1453 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1454 * data fork depending on the space the attribute fork is taking so we can get
1455 * invalid formats on the target inode.
1456 *
1457 * E.g. target has space for 7 extents in extent format, temp inode only has
1458 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1459 * btree, but when swapped it needs to be in extent format. Hence we can't just
1460 * blindly swap data forks on attr2 filesystems.
1461 *
1462 * Note that we check the swap in both directions so that we don't end up with
1463 * a corrupt temporary inode, either.
1464 *
1465 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1466 * inode will prevent this situation from occurring, so all we do here is
1467 * reject and log the attempt. basically we are putting the responsibility on
1468 * userspace to get this right.
1469 */
1470 static int
1471 xfs_swap_extents_check_format(
1472 xfs_inode_t *ip, /* target inode */
1473 xfs_inode_t *tip) /* tmp inode */
1474 {
1475
1476 /* Should never get a local format */
1477 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1478 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1479 return EINVAL;
1480
1481 /*
1482 * if the target inode has less extents that then temporary inode then
1483 * why did userspace call us?
1484 */
1485 if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1486 return EINVAL;
1487
1488 /*
1489 * if the target inode is in extent form and the temp inode is in btree
1490 * form then we will end up with the target inode in the wrong format
1491 * as we already know there are less extents in the temp inode.
1492 */
1493 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1494 tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1495 return EINVAL;
1496
1497 /* Check temp in extent form to max in target */
1498 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1499 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1500 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1501 return EINVAL;
1502
1503 /* Check target in extent form to max in temp */
1504 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1505 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1506 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1507 return EINVAL;
1508
1509 /*
1510 * If we are in a btree format, check that the temp root block will fit
1511 * in the target and that it has enough extents to be in btree format
1512 * in the target.
1513 *
1514 * Note that we have to be careful to allow btree->extent conversions
1515 * (a common defrag case) which will occur when the temp inode is in
1516 * extent format...
1517 */
1518 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1519 if (XFS_IFORK_BOFF(ip) &&
1520 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1521 return EINVAL;
1522 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1523 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1524 return EINVAL;
1525 }
1526
1527 /* Reciprocal target->temp btree format checks */
1528 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1529 if (XFS_IFORK_BOFF(tip) &&
1530 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1531 return EINVAL;
1532 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1533 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1534 return EINVAL;
1535 }
1536
1537 return 0;
1538 }
1539
1540 int
1541 xfs_swap_extents(
1542 xfs_inode_t *ip, /* target inode */
1543 xfs_inode_t *tip, /* tmp inode */
1544 xfs_swapext_t *sxp)
1545 {
1546 xfs_mount_t *mp = ip->i_mount;
1547 xfs_trans_t *tp;
1548 xfs_bstat_t *sbp = &sxp->sx_stat;
1549 xfs_ifork_t *tempifp, *ifp, *tifp;
1550 int src_log_flags, target_log_flags;
1551 int error = 0;
1552 int aforkblks = 0;
1553 int taforkblks = 0;
1554 __uint64_t tmp;
1555
1556 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
1557 if (!tempifp) {
1558 error = XFS_ERROR(ENOMEM);
1559 goto out;
1560 }
1561
1562 /*
1563 * we have to do two separate lock calls here to keep lockdep
1564 * happy. If we try to get all the locks in one call, lock will
1565 * report false positives when we drop the ILOCK and regain them
1566 * below.
1567 */
1568 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
1569 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1570
1571 /* Verify that both files have the same format */
1572 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
1573 error = XFS_ERROR(EINVAL);
1574 goto out_unlock;
1575 }
1576
1577 /* Verify both files are either real-time or non-realtime */
1578 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1579 error = XFS_ERROR(EINVAL);
1580 goto out_unlock;
1581 }
1582
1583 error = -filemap_write_and_wait(VFS_I(tip)->i_mapping);
1584 if (error)
1585 goto out_unlock;
1586 truncate_pagecache_range(VFS_I(tip), 0, -1);
1587
1588 /* Verify O_DIRECT for ftmp */
1589 if (VN_CACHED(VFS_I(tip)) != 0) {
1590 error = XFS_ERROR(EINVAL);
1591 goto out_unlock;
1592 }
1593
1594 /* Verify all data are being swapped */
1595 if (sxp->sx_offset != 0 ||
1596 sxp->sx_length != ip->i_d.di_size ||
1597 sxp->sx_length != tip->i_d.di_size) {
1598 error = XFS_ERROR(EFAULT);
1599 goto out_unlock;
1600 }
1601
1602 trace_xfs_swap_extent_before(ip, 0);
1603 trace_xfs_swap_extent_before(tip, 1);
1604
1605 /* check inode formats now that data is flushed */
1606 error = xfs_swap_extents_check_format(ip, tip);
1607 if (error) {
1608 xfs_notice(mp,
1609 "%s: inode 0x%llx format is incompatible for exchanging.",
1610 __func__, ip->i_ino);
1611 goto out_unlock;
1612 }
1613
1614 /*
1615 * Compare the current change & modify times with that
1616 * passed in. If they differ, we abort this swap.
1617 * This is the mechanism used to ensure the calling
1618 * process that the file was not changed out from
1619 * under it.
1620 */
1621 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1622 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1623 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1624 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1625 error = XFS_ERROR(EBUSY);
1626 goto out_unlock;
1627 }
1628
1629 /* We need to fail if the file is memory mapped. Once we have tossed
1630 * all existing pages, the page fault will have no option
1631 * but to go to the filesystem for pages. By making the page fault call
1632 * vop_read (or write in the case of autogrow) they block on the iolock
1633 * until we have switched the extents.
1634 */
1635 if (VN_MAPPED(VFS_I(ip))) {
1636 error = XFS_ERROR(EBUSY);
1637 goto out_unlock;
1638 }
1639
1640 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1641 xfs_iunlock(tip, XFS_ILOCK_EXCL);
1642
1643 /*
1644 * There is a race condition here since we gave up the
1645 * ilock. However, the data fork will not change since
1646 * we have the iolock (locked for truncation too) so we
1647 * are safe. We don't really care if non-io related
1648 * fields change.
1649 */
1650 truncate_pagecache_range(VFS_I(ip), 0, -1);
1651
1652 tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
1653 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
1654 if (error) {
1655 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1656 xfs_iunlock(tip, XFS_IOLOCK_EXCL);
1657 xfs_trans_cancel(tp, 0);
1658 goto out;
1659 }
1660 xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
1661
1662 /*
1663 * Count the number of extended attribute blocks
1664 */
1665 if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1666 (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1667 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
1668 if (error)
1669 goto out_trans_cancel;
1670 }
1671 if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1672 (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1673 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1674 &taforkblks);
1675 if (error)
1676 goto out_trans_cancel;
1677 }
1678
1679 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1680 xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1681
1682 /*
1683 * Before we've swapped the forks, lets set the owners of the forks
1684 * appropriately. We have to do this as we are demand paging the btree
1685 * buffers, and so the validation done on read will expect the owner
1686 * field to be correctly set. Once we change the owners, we can swap the
1687 * inode forks.
1688 *
1689 * Note the trickiness in setting the log flags - we set the owner log
1690 * flag on the opposite inode (i.e. the inode we are setting the new
1691 * owner to be) because once we swap the forks and log that, log
1692 * recovery is going to see the fork as owned by the swapped inode,
1693 * not the pre-swapped inodes.
1694 */
1695 src_log_flags = XFS_ILOG_CORE;
1696 target_log_flags = XFS_ILOG_CORE;
1697 if (ip->i_d.di_version == 3 &&
1698 ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1699 target_log_flags |= XFS_ILOG_DOWNER;
1700 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1701 tip->i_ino, NULL);
1702 if (error)
1703 goto out_trans_cancel;
1704 }
1705
1706 if (tip->i_d.di_version == 3 &&
1707 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1708 src_log_flags |= XFS_ILOG_DOWNER;
1709 error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1710 ip->i_ino, NULL);
1711 if (error)
1712 goto out_trans_cancel;
1713 }
1714
1715 /*
1716 * Swap the data forks of the inodes
1717 */
1718 ifp = &ip->i_df;
1719 tifp = &tip->i_df;
1720 *tempifp = *ifp; /* struct copy */
1721 *ifp = *tifp; /* struct copy */
1722 *tifp = *tempifp; /* struct copy */
1723
1724 /*
1725 * Fix the on-disk inode values
1726 */
1727 tmp = (__uint64_t)ip->i_d.di_nblocks;
1728 ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1729 tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
1730
1731 tmp = (__uint64_t) ip->i_d.di_nextents;
1732 ip->i_d.di_nextents = tip->i_d.di_nextents;
1733 tip->i_d.di_nextents = tmp;
1734
1735 tmp = (__uint64_t) ip->i_d.di_format;
1736 ip->i_d.di_format = tip->i_d.di_format;
1737 tip->i_d.di_format = tmp;
1738
1739 /*
1740 * The extents in the source inode could still contain speculative
1741 * preallocation beyond EOF (e.g. the file is open but not modified
1742 * while defrag is in progress). In that case, we need to copy over the
1743 * number of delalloc blocks the data fork in the source inode is
1744 * tracking beyond EOF so that when the fork is truncated away when the
1745 * temporary inode is unlinked we don't underrun the i_delayed_blks
1746 * counter on that inode.
1747 */
1748 ASSERT(tip->i_delayed_blks == 0);
1749 tip->i_delayed_blks = ip->i_delayed_blks;
1750 ip->i_delayed_blks = 0;
1751
1752 switch (ip->i_d.di_format) {
1753 case XFS_DINODE_FMT_EXTENTS:
1754 /* If the extents fit in the inode, fix the
1755 * pointer. Otherwise it's already NULL or
1756 * pointing to the extent.
1757 */
1758 if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1759 ifp->if_u1.if_extents =
1760 ifp->if_u2.if_inline_ext;
1761 }
1762 src_log_flags |= XFS_ILOG_DEXT;
1763 break;
1764 case XFS_DINODE_FMT_BTREE:
1765 ASSERT(ip->i_d.di_version < 3 ||
1766 (src_log_flags & XFS_ILOG_DOWNER));
1767 src_log_flags |= XFS_ILOG_DBROOT;
1768 break;
1769 }
1770
1771 switch (tip->i_d.di_format) {
1772 case XFS_DINODE_FMT_EXTENTS:
1773 /* If the extents fit in the inode, fix the
1774 * pointer. Otherwise it's already NULL or
1775 * pointing to the extent.
1776 */
1777 if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
1778 tifp->if_u1.if_extents =
1779 tifp->if_u2.if_inline_ext;
1780 }
1781 target_log_flags |= XFS_ILOG_DEXT;
1782 break;
1783 case XFS_DINODE_FMT_BTREE:
1784 target_log_flags |= XFS_ILOG_DBROOT;
1785 ASSERT(tip->i_d.di_version < 3 ||
1786 (target_log_flags & XFS_ILOG_DOWNER));
1787 break;
1788 }
1789
1790 xfs_trans_log_inode(tp, ip, src_log_flags);
1791 xfs_trans_log_inode(tp, tip, target_log_flags);
1792
1793 /*
1794 * If this is a synchronous mount, make sure that the
1795 * transaction goes to disk before returning to the user.
1796 */
1797 if (mp->m_flags & XFS_MOUNT_WSYNC)
1798 xfs_trans_set_sync(tp);
1799
1800 error = xfs_trans_commit(tp, 0);
1801
1802 trace_xfs_swap_extent_after(ip, 0);
1803 trace_xfs_swap_extent_after(tip, 1);
1804 out:
1805 kmem_free(tempifp);
1806 return error;
1807
1808 out_unlock:
1809 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1810 xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1811 goto out;
1812
1813 out_trans_cancel:
1814 xfs_trans_cancel(tp, 0);
1815 goto out_unlock;
1816 }
This page took 0.079446 seconds and 6 git commands to generate.