2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_types.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
32 #include "xfs_dmapi.h"
33 #include "xfs_mount.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_alloc_btree.h"
36 #include "xfs_ialloc_btree.h"
37 #include "xfs_dir2_sf.h"
38 #include "xfs_attr_sf.h"
39 #include "xfs_dinode.h"
40 #include "xfs_inode.h"
41 #include "xfs_buf_item.h"
42 #include "xfs_inode_item.h"
43 #include "xfs_btree.h"
44 #include "xfs_alloc.h"
45 #include "xfs_ialloc.h"
48 #include "xfs_error.h"
49 #include "xfs_utils.h"
50 #include "xfs_dir2_trace.h"
51 #include "xfs_quota.h"
53 #include "xfs_filestream.h"
54 #include "xfs_vnodeops.h"
56 kmem_zone_t
*xfs_ifork_zone
;
57 kmem_zone_t
*xfs_inode_zone
;
60 * Used in xfs_itruncate(). This is the maximum number of extents
61 * freed from a file in a single transaction.
63 #define XFS_ITRUNC_MAX_EXTENTS 2
65 STATIC
int xfs_iflush_int(xfs_inode_t
*, xfs_buf_t
*);
66 STATIC
int xfs_iformat_local(xfs_inode_t
*, xfs_dinode_t
*, int, int);
67 STATIC
int xfs_iformat_extents(xfs_inode_t
*, xfs_dinode_t
*, int);
68 STATIC
int xfs_iformat_btree(xfs_inode_t
*, xfs_dinode_t
*, int);
72 * Make sure that the extents in the given memory buffer
82 xfs_bmbt_rec_host_t rec
;
85 for (i
= 0; i
< nrecs
; i
++) {
86 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
87 rec
.l0
= get_unaligned(&ep
->l0
);
88 rec
.l1
= get_unaligned(&ep
->l1
);
89 xfs_bmbt_get_all(&rec
, &irec
);
90 if (fmt
== XFS_EXTFMT_NOSTATE
)
91 ASSERT(irec
.br_state
== XFS_EXT_NORM
);
95 #define xfs_validate_extents(ifp, nrecs, fmt)
99 * Check that none of the inode's in the buffer have a next
100 * unlinked field of 0.
112 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
114 for (i
= 0; i
< j
; i
++) {
115 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
116 i
* mp
->m_sb
.sb_inodesize
);
117 if (!dip
->di_next_unlinked
) {
118 xfs_fs_cmn_err(CE_ALERT
, mp
,
119 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
121 ASSERT(dip
->di_next_unlinked
);
128 * Find the buffer associated with the given inode map
129 * We do basic validation checks on the buffer once it has been
130 * retrieved from disk.
146 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
147 (int)imap
->im_len
, buf_flags
, &bp
);
149 if (error
!= EAGAIN
) {
151 "xfs_imap_to_bp: xfs_trans_read_buf()returned "
152 "an error %d on %s. Returning error.",
153 error
, mp
->m_fsname
);
155 ASSERT(buf_flags
& XFS_BUF_TRYLOCK
);
161 * Validate the magic number and version of every inode in the buffer
162 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
165 ni
= BBTOB(imap
->im_len
) >> mp
->m_sb
.sb_inodelog
;
166 #else /* usual case */
170 for (i
= 0; i
< ni
; i
++) {
174 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
175 (i
<< mp
->m_sb
.sb_inodelog
));
176 di_ok
= be16_to_cpu(dip
->di_core
.di_magic
) == XFS_DINODE_MAGIC
&&
177 XFS_DINODE_GOOD_VERSION(dip
->di_core
.di_version
);
178 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
179 XFS_ERRTAG_ITOBP_INOTOBP
,
180 XFS_RANDOM_ITOBP_INOTOBP
))) {
181 if (imap_flags
& XFS_IMAP_BULKSTAT
) {
182 xfs_trans_brelse(tp
, bp
);
183 return XFS_ERROR(EINVAL
);
185 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
186 XFS_ERRLEVEL_HIGH
, mp
, dip
);
189 "Device %s - bad inode magic/vsn "
190 "daddr %lld #%d (magic=%x)",
191 XFS_BUFTARG_NAME(mp
->m_ddev_targp
),
192 (unsigned long long)imap
->im_blkno
, i
,
193 be16_to_cpu(dip
->di_core
.di_magic
));
195 xfs_trans_brelse(tp
, bp
);
196 return XFS_ERROR(EFSCORRUPTED
);
200 xfs_inobp_check(mp
, bp
);
203 * Mark the buffer as an inode buffer now that it looks good
205 XFS_BUF_SET_VTYPE(bp
, B_FS_INO
);
212 * This routine is called to map an inode number within a file
213 * system to the buffer containing the on-disk version of the
214 * inode. It returns a pointer to the buffer containing the
215 * on-disk inode in the bpp parameter, and in the dip parameter
216 * it returns a pointer to the on-disk inode within that buffer.
218 * If a non-zero error is returned, then the contents of bpp and
219 * dipp are undefined.
221 * Use xfs_imap() to determine the size and location of the
222 * buffer to read from disk.
238 error
= xfs_imap(mp
, tp
, ino
, &imap
, XFS_IMAP_LOOKUP
);
242 error
= xfs_imap_to_bp(mp
, tp
, &imap
, &bp
, XFS_BUF_LOCK
, 0);
246 *dipp
= (xfs_dinode_t
*)xfs_buf_offset(bp
, imap
.im_boffset
);
248 *offset
= imap
.im_boffset
;
254 * This routine is called to map an inode to the buffer containing
255 * the on-disk version of the inode. It returns a pointer to the
256 * buffer containing the on-disk inode in the bpp parameter, and in
257 * the dip parameter it returns a pointer to the on-disk inode within
260 * If a non-zero error is returned, then the contents of bpp and
261 * dipp are undefined.
263 * If the inode is new and has not yet been initialized, use xfs_imap()
264 * to determine the size and location of the buffer to read from disk.
265 * If the inode has already been mapped to its buffer and read in once,
266 * then use the mapping information stored in the inode rather than
267 * calling xfs_imap(). This allows us to avoid the overhead of looking
268 * at the inode btree for small block file systems (see xfs_dilocate()).
269 * We can tell whether the inode has been mapped in before by comparing
270 * its disk block address to 0. Only uninitialized inodes will have
271 * 0 for the disk block address.
288 if (ip
->i_blkno
== (xfs_daddr_t
)0) {
290 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &imap
,
291 XFS_IMAP_LOOKUP
| imap_flags
);
296 * Fill in the fields in the inode that will be used to
297 * map the inode to its buffer from now on.
299 ip
->i_blkno
= imap
.im_blkno
;
300 ip
->i_len
= imap
.im_len
;
301 ip
->i_boffset
= imap
.im_boffset
;
304 * We've already mapped the inode once, so just use the
305 * mapping that we saved the first time.
307 imap
.im_blkno
= ip
->i_blkno
;
308 imap
.im_len
= ip
->i_len
;
309 imap
.im_boffset
= ip
->i_boffset
;
311 ASSERT(bno
== 0 || bno
== imap
.im_blkno
);
313 error
= xfs_imap_to_bp(mp
, tp
, &imap
, &bp
, buf_flags
, imap_flags
);
318 ASSERT(buf_flags
& XFS_BUF_TRYLOCK
);
324 *dipp
= (xfs_dinode_t
*)xfs_buf_offset(bp
, imap
.im_boffset
);
330 * Move inode type and inode format specific information from the
331 * on-disk inode to the in-core inode. For fifos, devs, and sockets
332 * this means set if_rdev to the proper value. For files, directories,
333 * and symlinks this means to bring in the in-line data or extent
334 * pointers. For a file in B-tree format, only the root is immediately
335 * brought in-core. The rest will be in-lined in if_extents when it
336 * is first referenced (see xfs_iread_extents()).
343 xfs_attr_shortform_t
*atp
;
347 ip
->i_df
.if_ext_max
=
348 XFS_IFORK_DSIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
351 if (unlikely(be32_to_cpu(dip
->di_core
.di_nextents
) +
352 be16_to_cpu(dip
->di_core
.di_anextents
) >
353 be64_to_cpu(dip
->di_core
.di_nblocks
))) {
354 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
355 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
356 (unsigned long long)ip
->i_ino
,
357 (int)(be32_to_cpu(dip
->di_core
.di_nextents
) +
358 be16_to_cpu(dip
->di_core
.di_anextents
)),
360 be64_to_cpu(dip
->di_core
.di_nblocks
));
361 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW
,
363 return XFS_ERROR(EFSCORRUPTED
);
366 if (unlikely(dip
->di_core
.di_forkoff
> ip
->i_mount
->m_sb
.sb_inodesize
)) {
367 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
368 "corrupt dinode %Lu, forkoff = 0x%x.",
369 (unsigned long long)ip
->i_ino
,
370 dip
->di_core
.di_forkoff
);
371 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW
,
373 return XFS_ERROR(EFSCORRUPTED
);
376 switch (ip
->i_d
.di_mode
& S_IFMT
) {
381 if (unlikely(dip
->di_core
.di_format
!= XFS_DINODE_FMT_DEV
)) {
382 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW
,
384 return XFS_ERROR(EFSCORRUPTED
);
388 ip
->i_df
.if_u2
.if_rdev
= be32_to_cpu(dip
->di_u
.di_dev
);
394 switch (dip
->di_core
.di_format
) {
395 case XFS_DINODE_FMT_LOCAL
:
397 * no local regular files yet
399 if (unlikely((be16_to_cpu(dip
->di_core
.di_mode
) & S_IFMT
) == S_IFREG
)) {
400 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
402 "(local format for regular file).",
403 (unsigned long long) ip
->i_ino
);
404 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
407 return XFS_ERROR(EFSCORRUPTED
);
410 di_size
= be64_to_cpu(dip
->di_core
.di_size
);
411 if (unlikely(di_size
> XFS_DFORK_DSIZE(dip
, ip
->i_mount
))) {
412 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
414 "(bad size %Ld for local inode).",
415 (unsigned long long) ip
->i_ino
,
416 (long long) di_size
);
417 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
420 return XFS_ERROR(EFSCORRUPTED
);
424 error
= xfs_iformat_local(ip
, dip
, XFS_DATA_FORK
, size
);
426 case XFS_DINODE_FMT_EXTENTS
:
427 error
= xfs_iformat_extents(ip
, dip
, XFS_DATA_FORK
);
429 case XFS_DINODE_FMT_BTREE
:
430 error
= xfs_iformat_btree(ip
, dip
, XFS_DATA_FORK
);
433 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW
,
435 return XFS_ERROR(EFSCORRUPTED
);
440 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW
, ip
->i_mount
);
441 return XFS_ERROR(EFSCORRUPTED
);
446 if (!XFS_DFORK_Q(dip
))
448 ASSERT(ip
->i_afp
== NULL
);
449 ip
->i_afp
= kmem_zone_zalloc(xfs_ifork_zone
, KM_SLEEP
);
450 ip
->i_afp
->if_ext_max
=
451 XFS_IFORK_ASIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
452 switch (dip
->di_core
.di_aformat
) {
453 case XFS_DINODE_FMT_LOCAL
:
454 atp
= (xfs_attr_shortform_t
*)XFS_DFORK_APTR(dip
);
455 size
= be16_to_cpu(atp
->hdr
.totsize
);
456 error
= xfs_iformat_local(ip
, dip
, XFS_ATTR_FORK
, size
);
458 case XFS_DINODE_FMT_EXTENTS
:
459 error
= xfs_iformat_extents(ip
, dip
, XFS_ATTR_FORK
);
461 case XFS_DINODE_FMT_BTREE
:
462 error
= xfs_iformat_btree(ip
, dip
, XFS_ATTR_FORK
);
465 error
= XFS_ERROR(EFSCORRUPTED
);
469 kmem_zone_free(xfs_ifork_zone
, ip
->i_afp
);
471 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
477 * The file is in-lined in the on-disk inode.
478 * If it fits into if_inline_data, then copy
479 * it there, otherwise allocate a buffer for it
480 * and copy the data there. Either way, set
481 * if_data to point at the data.
482 * If we allocate a buffer for the data, make
483 * sure that its size is a multiple of 4 and
484 * record the real size in i_real_bytes.
497 * If the size is unreasonable, then something
498 * is wrong and we just bail out rather than crash in
499 * kmem_alloc() or memcpy() below.
501 if (unlikely(size
> XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
))) {
502 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
504 "(bad size %d for local fork, size = %d).",
505 (unsigned long long) ip
->i_ino
, size
,
506 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
));
507 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW
,
509 return XFS_ERROR(EFSCORRUPTED
);
511 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
514 ifp
->if_u1
.if_data
= NULL
;
515 else if (size
<= sizeof(ifp
->if_u2
.if_inline_data
))
516 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
518 real_size
= roundup(size
, 4);
519 ifp
->if_u1
.if_data
= kmem_alloc(real_size
, KM_SLEEP
);
521 ifp
->if_bytes
= size
;
522 ifp
->if_real_bytes
= real_size
;
524 memcpy(ifp
->if_u1
.if_data
, XFS_DFORK_PTR(dip
, whichfork
), size
);
525 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
526 ifp
->if_flags
|= XFS_IFINLINE
;
531 * The file consists of a set of extents all
532 * of which fit into the on-disk inode.
533 * If there are few enough extents to fit into
534 * the if_inline_ext, then copy them there.
535 * Otherwise allocate a buffer for them and copy
536 * them into it. Either way, set if_extents
537 * to point at the extents.
551 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
552 nex
= XFS_DFORK_NEXTENTS(dip
, whichfork
);
553 size
= nex
* (uint
)sizeof(xfs_bmbt_rec_t
);
556 * If the number of extents is unreasonable, then something
557 * is wrong and we just bail out rather than crash in
558 * kmem_alloc() or memcpy() below.
560 if (unlikely(size
< 0 || size
> XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
))) {
561 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
562 "corrupt inode %Lu ((a)extents = %d).",
563 (unsigned long long) ip
->i_ino
, nex
);
564 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW
,
566 return XFS_ERROR(EFSCORRUPTED
);
569 ifp
->if_real_bytes
= 0;
571 ifp
->if_u1
.if_extents
= NULL
;
572 else if (nex
<= XFS_INLINE_EXTS
)
573 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
575 xfs_iext_add(ifp
, 0, nex
);
577 ifp
->if_bytes
= size
;
579 dp
= (xfs_bmbt_rec_t
*) XFS_DFORK_PTR(dip
, whichfork
);
580 xfs_validate_extents(ifp
, nex
, XFS_EXTFMT_INODE(ip
));
581 for (i
= 0; i
< nex
; i
++, dp
++) {
582 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
583 ep
->l0
= be64_to_cpu(get_unaligned(&dp
->l0
));
584 ep
->l1
= be64_to_cpu(get_unaligned(&dp
->l1
));
586 XFS_BMAP_TRACE_EXLIST(ip
, nex
, whichfork
);
587 if (whichfork
!= XFS_DATA_FORK
||
588 XFS_EXTFMT_INODE(ip
) == XFS_EXTFMT_NOSTATE
)
589 if (unlikely(xfs_check_nostate_extents(
591 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
594 return XFS_ERROR(EFSCORRUPTED
);
597 ifp
->if_flags
|= XFS_IFEXTENTS
;
602 * The file has too many extents to fit into
603 * the inode, so they are in B-tree format.
604 * Allocate a buffer for the root of the B-tree
605 * and copy the root into it. The i_extents
606 * field will remain NULL until all of the
607 * extents are read in (when they are needed).
615 xfs_bmdr_block_t
*dfp
;
621 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
622 dfp
= (xfs_bmdr_block_t
*)XFS_DFORK_PTR(dip
, whichfork
);
623 size
= XFS_BMAP_BROOT_SPACE(dfp
);
624 nrecs
= XFS_BMAP_BROOT_NUMRECS(dfp
);
627 * blow out if -- fork has less extents than can fit in
628 * fork (fork shouldn't be a btree format), root btree
629 * block has more records than can fit into the fork,
630 * or the number of extents is greater than the number of
633 if (unlikely(XFS_IFORK_NEXTENTS(ip
, whichfork
) <= ifp
->if_ext_max
634 || XFS_BMDR_SPACE_CALC(nrecs
) >
635 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
)
636 || XFS_IFORK_NEXTENTS(ip
, whichfork
) > ip
->i_d
.di_nblocks
)) {
637 xfs_fs_repair_cmn_err(CE_WARN
, ip
->i_mount
,
638 "corrupt inode %Lu (btree).",
639 (unsigned long long) ip
->i_ino
);
640 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW
,
642 return XFS_ERROR(EFSCORRUPTED
);
645 ifp
->if_broot_bytes
= size
;
646 ifp
->if_broot
= kmem_alloc(size
, KM_SLEEP
);
647 ASSERT(ifp
->if_broot
!= NULL
);
649 * Copy and convert from the on-disk structure
650 * to the in-memory structure.
652 xfs_bmdr_to_bmbt(dfp
, XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
),
653 ifp
->if_broot
, size
);
654 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
655 ifp
->if_flags
|= XFS_IFBROOT
;
661 xfs_dinode_from_disk(
663 xfs_dinode_core_t
*from
)
665 to
->di_magic
= be16_to_cpu(from
->di_magic
);
666 to
->di_mode
= be16_to_cpu(from
->di_mode
);
667 to
->di_version
= from
->di_version
;
668 to
->di_format
= from
->di_format
;
669 to
->di_onlink
= be16_to_cpu(from
->di_onlink
);
670 to
->di_uid
= be32_to_cpu(from
->di_uid
);
671 to
->di_gid
= be32_to_cpu(from
->di_gid
);
672 to
->di_nlink
= be32_to_cpu(from
->di_nlink
);
673 to
->di_projid
= be16_to_cpu(from
->di_projid
);
674 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
675 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
676 to
->di_atime
.t_sec
= be32_to_cpu(from
->di_atime
.t_sec
);
677 to
->di_atime
.t_nsec
= be32_to_cpu(from
->di_atime
.t_nsec
);
678 to
->di_mtime
.t_sec
= be32_to_cpu(from
->di_mtime
.t_sec
);
679 to
->di_mtime
.t_nsec
= be32_to_cpu(from
->di_mtime
.t_nsec
);
680 to
->di_ctime
.t_sec
= be32_to_cpu(from
->di_ctime
.t_sec
);
681 to
->di_ctime
.t_nsec
= be32_to_cpu(from
->di_ctime
.t_nsec
);
682 to
->di_size
= be64_to_cpu(from
->di_size
);
683 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
684 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
685 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
686 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
687 to
->di_forkoff
= from
->di_forkoff
;
688 to
->di_aformat
= from
->di_aformat
;
689 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
690 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
691 to
->di_flags
= be16_to_cpu(from
->di_flags
);
692 to
->di_gen
= be32_to_cpu(from
->di_gen
);
697 xfs_dinode_core_t
*to
,
698 xfs_icdinode_t
*from
)
700 to
->di_magic
= cpu_to_be16(from
->di_magic
);
701 to
->di_mode
= cpu_to_be16(from
->di_mode
);
702 to
->di_version
= from
->di_version
;
703 to
->di_format
= from
->di_format
;
704 to
->di_onlink
= cpu_to_be16(from
->di_onlink
);
705 to
->di_uid
= cpu_to_be32(from
->di_uid
);
706 to
->di_gid
= cpu_to_be32(from
->di_gid
);
707 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
708 to
->di_projid
= cpu_to_be16(from
->di_projid
);
709 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
710 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
711 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
712 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
713 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
714 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
715 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
716 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
717 to
->di_size
= cpu_to_be64(from
->di_size
);
718 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
719 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
720 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
721 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
722 to
->di_forkoff
= from
->di_forkoff
;
723 to
->di_aformat
= from
->di_aformat
;
724 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
725 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
726 to
->di_flags
= cpu_to_be16(from
->di_flags
);
727 to
->di_gen
= cpu_to_be32(from
->di_gen
);
736 if (di_flags
& XFS_DIFLAG_ANY
) {
737 if (di_flags
& XFS_DIFLAG_REALTIME
)
738 flags
|= XFS_XFLAG_REALTIME
;
739 if (di_flags
& XFS_DIFLAG_PREALLOC
)
740 flags
|= XFS_XFLAG_PREALLOC
;
741 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
742 flags
|= XFS_XFLAG_IMMUTABLE
;
743 if (di_flags
& XFS_DIFLAG_APPEND
)
744 flags
|= XFS_XFLAG_APPEND
;
745 if (di_flags
& XFS_DIFLAG_SYNC
)
746 flags
|= XFS_XFLAG_SYNC
;
747 if (di_flags
& XFS_DIFLAG_NOATIME
)
748 flags
|= XFS_XFLAG_NOATIME
;
749 if (di_flags
& XFS_DIFLAG_NODUMP
)
750 flags
|= XFS_XFLAG_NODUMP
;
751 if (di_flags
& XFS_DIFLAG_RTINHERIT
)
752 flags
|= XFS_XFLAG_RTINHERIT
;
753 if (di_flags
& XFS_DIFLAG_PROJINHERIT
)
754 flags
|= XFS_XFLAG_PROJINHERIT
;
755 if (di_flags
& XFS_DIFLAG_NOSYMLINKS
)
756 flags
|= XFS_XFLAG_NOSYMLINKS
;
757 if (di_flags
& XFS_DIFLAG_EXTSIZE
)
758 flags
|= XFS_XFLAG_EXTSIZE
;
759 if (di_flags
& XFS_DIFLAG_EXTSZINHERIT
)
760 flags
|= XFS_XFLAG_EXTSZINHERIT
;
761 if (di_flags
& XFS_DIFLAG_NODEFRAG
)
762 flags
|= XFS_XFLAG_NODEFRAG
;
763 if (di_flags
& XFS_DIFLAG_FILESTREAM
)
764 flags
|= XFS_XFLAG_FILESTREAM
;
774 xfs_icdinode_t
*dic
= &ip
->i_d
;
776 return _xfs_dic2xflags(dic
->di_flags
) |
777 (XFS_IFORK_Q(ip
) ? XFS_XFLAG_HASATTR
: 0);
784 xfs_dinode_core_t
*dic
= &dip
->di_core
;
786 return _xfs_dic2xflags(be16_to_cpu(dic
->di_flags
)) |
787 (XFS_DFORK_Q(dip
) ? XFS_XFLAG_HASATTR
: 0);
791 * Given a mount structure and an inode number, return a pointer
792 * to a newly allocated in-core inode corresponding to the given
795 * Initialize the inode's attributes and extent pointers if it
796 * already has them (it will not if the inode has no links).
812 ASSERT(xfs_inode_zone
!= NULL
);
814 ip
= kmem_zone_zalloc(xfs_inode_zone
, KM_SLEEP
);
817 atomic_set(&ip
->i_iocount
, 0);
818 spin_lock_init(&ip
->i_flags_lock
);
821 * Get pointer's to the on-disk inode and the buffer containing it.
822 * If the inode number refers to a block outside the file system
823 * then xfs_itobp() will return NULL. In this case we should
824 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
825 * know that this is a new incore inode.
827 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &bp
, bno
, imap_flags
, XFS_BUF_LOCK
);
829 kmem_zone_free(xfs_inode_zone
, ip
);
834 * Initialize inode's trace buffers.
835 * Do this before xfs_iformat in case it adds entries.
837 #ifdef XFS_INODE_TRACE
838 ip
->i_trace
= ktrace_alloc(INODE_TRACE_SIZE
, KM_SLEEP
);
840 #ifdef XFS_BMAP_TRACE
841 ip
->i_xtrace
= ktrace_alloc(XFS_BMAP_KTRACE_SIZE
, KM_SLEEP
);
843 #ifdef XFS_BMBT_TRACE
844 ip
->i_btrace
= ktrace_alloc(XFS_BMBT_KTRACE_SIZE
, KM_SLEEP
);
847 ip
->i_rwtrace
= ktrace_alloc(XFS_RW_KTRACE_SIZE
, KM_SLEEP
);
849 #ifdef XFS_ILOCK_TRACE
850 ip
->i_lock_trace
= ktrace_alloc(XFS_ILOCK_KTRACE_SIZE
, KM_SLEEP
);
852 #ifdef XFS_DIR2_TRACE
853 ip
->i_dir_trace
= ktrace_alloc(XFS_DIR2_KTRACE_SIZE
, KM_SLEEP
);
857 * If we got something that isn't an inode it means someone
858 * (nfs or dmi) has a stale handle.
860 if (be16_to_cpu(dip
->di_core
.di_magic
) != XFS_DINODE_MAGIC
) {
861 kmem_zone_free(xfs_inode_zone
, ip
);
862 xfs_trans_brelse(tp
, bp
);
864 xfs_fs_cmn_err(CE_ALERT
, mp
, "xfs_iread: "
865 "dip->di_core.di_magic (0x%x) != "
866 "XFS_DINODE_MAGIC (0x%x)",
867 be16_to_cpu(dip
->di_core
.di_magic
),
870 return XFS_ERROR(EINVAL
);
874 * If the on-disk inode is already linked to a directory
875 * entry, copy all of the inode into the in-core inode.
876 * xfs_iformat() handles copying in the inode format
877 * specific information.
878 * Otherwise, just get the truly permanent information.
880 if (dip
->di_core
.di_mode
) {
881 xfs_dinode_from_disk(&ip
->i_d
, &dip
->di_core
);
882 error
= xfs_iformat(ip
, dip
);
884 kmem_zone_free(xfs_inode_zone
, ip
);
885 xfs_trans_brelse(tp
, bp
);
887 xfs_fs_cmn_err(CE_ALERT
, mp
, "xfs_iread: "
888 "xfs_iformat() returned error %d",
894 ip
->i_d
.di_magic
= be16_to_cpu(dip
->di_core
.di_magic
);
895 ip
->i_d
.di_version
= dip
->di_core
.di_version
;
896 ip
->i_d
.di_gen
= be32_to_cpu(dip
->di_core
.di_gen
);
897 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_core
.di_flushiter
);
899 * Make sure to pull in the mode here as well in
900 * case the inode is released without being used.
901 * This ensures that xfs_inactive() will see that
902 * the inode is already free and not try to mess
903 * with the uninitialized part of it.
907 * Initialize the per-fork minima and maxima for a new
908 * inode here. xfs_iformat will do it for old inodes.
910 ip
->i_df
.if_ext_max
=
911 XFS_IFORK_DSIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
914 INIT_LIST_HEAD(&ip
->i_reclaim
);
917 * The inode format changed when we moved the link count and
918 * made it 32 bits long. If this is an old format inode,
919 * convert it in memory to look like a new one. If it gets
920 * flushed to disk we will convert back before flushing or
921 * logging it. We zero out the new projid field and the old link
922 * count field. We'll handle clearing the pad field (the remains
923 * of the old uuid field) when we actually convert the inode to
924 * the new format. We don't change the version number so that we
925 * can distinguish this from a real new format inode.
927 if (ip
->i_d
.di_version
== XFS_DINODE_VERSION_1
) {
928 ip
->i_d
.di_nlink
= ip
->i_d
.di_onlink
;
929 ip
->i_d
.di_onlink
= 0;
930 ip
->i_d
.di_projid
= 0;
933 ip
->i_delayed_blks
= 0;
934 ip
->i_size
= ip
->i_d
.di_size
;
937 * Mark the buffer containing the inode as something to keep
938 * around for a while. This helps to keep recently accessed
939 * meta-data in-core longer.
941 XFS_BUF_SET_REF(bp
, XFS_INO_REF
);
944 * Use xfs_trans_brelse() to release the buffer containing the
945 * on-disk inode, because it was acquired with xfs_trans_read_buf()
946 * in xfs_itobp() above. If tp is NULL, this is just a normal
947 * brelse(). If we're within a transaction, then xfs_trans_brelse()
948 * will only release the buffer if it is not dirty within the
949 * transaction. It will be OK to release the buffer in this case,
950 * because inodes on disk are never destroyed and we will be
951 * locking the new in-core inode before putting it in the hash
952 * table where other processes can find it. Thus we don't have
953 * to worry about the inode being changed just because we released
956 xfs_trans_brelse(tp
, bp
);
962 * Read in extents from a btree-format inode.
963 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
973 xfs_extnum_t nextents
;
976 if (unlikely(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)) {
977 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW
,
979 return XFS_ERROR(EFSCORRUPTED
);
981 nextents
= XFS_IFORK_NEXTENTS(ip
, whichfork
);
982 size
= nextents
* sizeof(xfs_bmbt_rec_t
);
983 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
986 * We know that the size is valid (it's checked in iformat_btree)
988 ifp
->if_lastex
= NULLEXTNUM
;
989 ifp
->if_bytes
= ifp
->if_real_bytes
= 0;
990 ifp
->if_flags
|= XFS_IFEXTENTS
;
991 xfs_iext_add(ifp
, 0, nextents
);
992 error
= xfs_bmap_read_extents(tp
, ip
, whichfork
);
994 xfs_iext_destroy(ifp
);
995 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
998 xfs_validate_extents(ifp
, nextents
, XFS_EXTFMT_INODE(ip
));
1003 * Allocate an inode on disk and return a copy of its in-core version.
1004 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1005 * appropriately within the inode. The uid and gid for the inode are
1006 * set according to the contents of the given cred structure.
1008 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1009 * has a free inode available, call xfs_iget()
1010 * to obtain the in-core version of the allocated inode. Finally,
1011 * fill in the inode and log its initial contents. In this case,
1012 * ialloc_context would be set to NULL and call_again set to false.
1014 * If xfs_dialloc() does not have an available inode,
1015 * it will replenish its supply by doing an allocation. Since we can
1016 * only do one allocation within a transaction without deadlocks, we
1017 * must commit the current transaction before returning the inode itself.
1018 * In this case, therefore, we will set call_again to true and return.
1019 * The caller should then commit the current transaction, start a new
1020 * transaction, and call xfs_ialloc() again to actually get the inode.
1022 * To ensure that some other process does not grab the inode that
1023 * was allocated during the first call to xfs_ialloc(), this routine
1024 * also returns the [locked] bp pointing to the head of the freelist
1025 * as ialloc_context. The caller should hold this buffer across
1026 * the commit and pass it back into this routine on the second call.
1028 * If we are allocating quota inodes, we do not have a parent inode
1029 * to attach to or associate with (i.e. pip == NULL) because they
1030 * are not linked into the directory structure - they are attached
1031 * directly to the superblock - and so have no parent.
1043 xfs_buf_t
**ialloc_context
,
1044 boolean_t
*call_again
,
1053 * Call the space management code to pick
1054 * the on-disk inode to be allocated.
1056 error
= xfs_dialloc(tp
, pip
? pip
->i_ino
: 0, mode
, okalloc
,
1057 ialloc_context
, call_again
, &ino
);
1061 if (*call_again
|| ino
== NULLFSINO
) {
1065 ASSERT(*ialloc_context
== NULL
);
1068 * Get the in-core inode with the lock held exclusively.
1069 * This is because we're setting fields here we need
1070 * to prevent others from looking at until we're done.
1072 error
= xfs_trans_iget(tp
->t_mountp
, tp
, ino
,
1073 XFS_IGET_CREATE
, XFS_ILOCK_EXCL
, &ip
);
1079 ip
->i_d
.di_mode
= (__uint16_t
)mode
;
1080 ip
->i_d
.di_onlink
= 0;
1081 ip
->i_d
.di_nlink
= nlink
;
1082 ASSERT(ip
->i_d
.di_nlink
== nlink
);
1083 ip
->i_d
.di_uid
= current_fsuid(cr
);
1084 ip
->i_d
.di_gid
= current_fsgid(cr
);
1085 ip
->i_d
.di_projid
= prid
;
1086 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
1089 * If the superblock version is up to where we support new format
1090 * inodes and this is currently an old format inode, then change
1091 * the inode version number now. This way we only do the conversion
1092 * here rather than here and in the flush/logging code.
1094 if (xfs_sb_version_hasnlink(&tp
->t_mountp
->m_sb
) &&
1095 ip
->i_d
.di_version
== XFS_DINODE_VERSION_1
) {
1096 ip
->i_d
.di_version
= XFS_DINODE_VERSION_2
;
1098 * We've already zeroed the old link count, the projid field,
1099 * and the pad field.
1104 * Project ids won't be stored on disk if we are using a version 1 inode.
1106 if ((prid
!= 0) && (ip
->i_d
.di_version
== XFS_DINODE_VERSION_1
))
1107 xfs_bump_ino_vers2(tp
, ip
);
1109 if (pip
&& XFS_INHERIT_GID(pip
)) {
1110 ip
->i_d
.di_gid
= pip
->i_d
.di_gid
;
1111 if ((pip
->i_d
.di_mode
& S_ISGID
) && (mode
& S_IFMT
) == S_IFDIR
) {
1112 ip
->i_d
.di_mode
|= S_ISGID
;
1117 * If the group ID of the new file does not match the effective group
1118 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1119 * (and only if the irix_sgid_inherit compatibility variable is set).
1121 if ((irix_sgid_inherit
) &&
1122 (ip
->i_d
.di_mode
& S_ISGID
) &&
1123 (!in_group_p((gid_t
)ip
->i_d
.di_gid
))) {
1124 ip
->i_d
.di_mode
&= ~S_ISGID
;
1127 ip
->i_d
.di_size
= 0;
1129 ip
->i_d
.di_nextents
= 0;
1130 ASSERT(ip
->i_d
.di_nblocks
== 0);
1131 xfs_ichgtime(ip
, XFS_ICHGTIME_CHG
|XFS_ICHGTIME_ACC
|XFS_ICHGTIME_MOD
);
1133 * di_gen will have been taken care of in xfs_iread.
1135 ip
->i_d
.di_extsize
= 0;
1136 ip
->i_d
.di_dmevmask
= 0;
1137 ip
->i_d
.di_dmstate
= 0;
1138 ip
->i_d
.di_flags
= 0;
1139 flags
= XFS_ILOG_CORE
;
1140 switch (mode
& S_IFMT
) {
1145 ip
->i_d
.di_format
= XFS_DINODE_FMT_DEV
;
1146 ip
->i_df
.if_u2
.if_rdev
= rdev
;
1147 ip
->i_df
.if_flags
= 0;
1148 flags
|= XFS_ILOG_DEV
;
1151 if (pip
&& xfs_inode_is_filestream(pip
)) {
1152 error
= xfs_filestream_associate(pip
, ip
);
1156 xfs_iflags_set(ip
, XFS_IFILESTREAM
);
1160 if (pip
&& (pip
->i_d
.di_flags
& XFS_DIFLAG_ANY
)) {
1163 if ((mode
& S_IFMT
) == S_IFDIR
) {
1164 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
1165 di_flags
|= XFS_DIFLAG_RTINHERIT
;
1166 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
1167 di_flags
|= XFS_DIFLAG_EXTSZINHERIT
;
1168 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
1170 } else if ((mode
& S_IFMT
) == S_IFREG
) {
1171 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
1172 di_flags
|= XFS_DIFLAG_REALTIME
;
1173 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
1174 di_flags
|= XFS_DIFLAG_EXTSIZE
;
1175 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
1178 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOATIME
) &&
1179 xfs_inherit_noatime
)
1180 di_flags
|= XFS_DIFLAG_NOATIME
;
1181 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODUMP
) &&
1183 di_flags
|= XFS_DIFLAG_NODUMP
;
1184 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_SYNC
) &&
1186 di_flags
|= XFS_DIFLAG_SYNC
;
1187 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOSYMLINKS
) &&
1188 xfs_inherit_nosymlinks
)
1189 di_flags
|= XFS_DIFLAG_NOSYMLINKS
;
1190 if (pip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
)
1191 di_flags
|= XFS_DIFLAG_PROJINHERIT
;
1192 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODEFRAG
) &&
1193 xfs_inherit_nodefrag
)
1194 di_flags
|= XFS_DIFLAG_NODEFRAG
;
1195 if (pip
->i_d
.di_flags
& XFS_DIFLAG_FILESTREAM
)
1196 di_flags
|= XFS_DIFLAG_FILESTREAM
;
1197 ip
->i_d
.di_flags
|= di_flags
;
1201 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
1202 ip
->i_df
.if_flags
= XFS_IFEXTENTS
;
1203 ip
->i_df
.if_bytes
= ip
->i_df
.if_real_bytes
= 0;
1204 ip
->i_df
.if_u1
.if_extents
= NULL
;
1210 * Attribute fork settings for new inode.
1212 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1213 ip
->i_d
.di_anextents
= 0;
1216 * Log the new values stuffed into the inode.
1218 xfs_trans_log_inode(tp
, ip
, flags
);
1220 /* now that we have an i_mode we can setup inode ops and unlock */
1221 xfs_setup_inode(ip
);
1228 * Check to make sure that there are no blocks allocated to the
1229 * file beyond the size of the file. We don't check this for
1230 * files with fixed size extents or real time extents, but we
1231 * at least do it for regular files.
1240 xfs_fileoff_t map_first
;
1242 xfs_bmbt_irec_t imaps
[2];
1244 if ((ip
->i_d
.di_mode
& S_IFMT
) != S_IFREG
)
1247 if (XFS_IS_REALTIME_INODE(ip
))
1250 if (ip
->i_d
.di_flags
& XFS_DIFLAG_EXTSIZE
)
1254 map_first
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)isize
);
1256 * The filesystem could be shutting down, so bmapi may return
1259 if (xfs_bmapi(NULL
, ip
, map_first
,
1261 (xfs_ufsize_t
)XFS_MAXIOFFSET(mp
)) -
1263 XFS_BMAPI_ENTIRE
, NULL
, 0, imaps
, &nimaps
,
1266 ASSERT(nimaps
== 1);
1267 ASSERT(imaps
[0].br_startblock
== HOLESTARTBLOCK
);
1272 * Calculate the last possible buffered byte in a file. This must
1273 * include data that was buffered beyond the EOF by the write code.
1274 * This also needs to deal with overflowing the xfs_fsize_t type
1275 * which can happen for sizes near the limit.
1277 * We also need to take into account any blocks beyond the EOF. It
1278 * may be the case that they were buffered by a write which failed.
1279 * In that case the pages will still be in memory, but the inode size
1280 * will never have been updated.
1287 xfs_fsize_t last_byte
;
1288 xfs_fileoff_t last_block
;
1289 xfs_fileoff_t size_last_block
;
1292 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
));
1296 * Only check for blocks beyond the EOF if the extents have
1297 * been read in. This eliminates the need for the inode lock,
1298 * and it also saves us from looking when it really isn't
1301 if (ip
->i_df
.if_flags
& XFS_IFEXTENTS
) {
1302 error
= xfs_bmap_last_offset(NULL
, ip
, &last_block
,
1310 size_last_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)ip
->i_size
);
1311 last_block
= XFS_FILEOFF_MAX(last_block
, size_last_block
);
1313 last_byte
= XFS_FSB_TO_B(mp
, last_block
);
1314 if (last_byte
< 0) {
1315 return XFS_MAXIOFFSET(mp
);
1317 last_byte
+= (1 << mp
->m_writeio_log
);
1318 if (last_byte
< 0) {
1319 return XFS_MAXIOFFSET(mp
);
1324 #if defined(XFS_RW_TRACE)
1330 xfs_fsize_t new_size
,
1331 xfs_off_t toss_start
,
1332 xfs_off_t toss_finish
)
1334 if (ip
->i_rwtrace
== NULL
) {
1338 ktrace_enter(ip
->i_rwtrace
,
1341 (void*)(unsigned long)((ip
->i_d
.di_size
>> 32) & 0xffffffff),
1342 (void*)(unsigned long)(ip
->i_d
.di_size
& 0xffffffff),
1343 (void*)((long)flag
),
1344 (void*)(unsigned long)((new_size
>> 32) & 0xffffffff),
1345 (void*)(unsigned long)(new_size
& 0xffffffff),
1346 (void*)(unsigned long)((toss_start
>> 32) & 0xffffffff),
1347 (void*)(unsigned long)(toss_start
& 0xffffffff),
1348 (void*)(unsigned long)((toss_finish
>> 32) & 0xffffffff),
1349 (void*)(unsigned long)(toss_finish
& 0xffffffff),
1350 (void*)(unsigned long)current_cpu(),
1351 (void*)(unsigned long)current_pid(),
1357 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1361 * Start the truncation of the file to new_size. The new size
1362 * must be smaller than the current size. This routine will
1363 * clear the buffer and page caches of file data in the removed
1364 * range, and xfs_itruncate_finish() will remove the underlying
1367 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1368 * must NOT have the inode lock held at all. This is because we're
1369 * calling into the buffer/page cache code and we can't hold the
1370 * inode lock when we do so.
1372 * We need to wait for any direct I/Os in flight to complete before we
1373 * proceed with the truncate. This is needed to prevent the extents
1374 * being read or written by the direct I/Os from being removed while the
1375 * I/O is in flight as there is no other method of synchronising
1376 * direct I/O with the truncate operation. Also, because we hold
1377 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1378 * started until the truncate completes and drops the lock. Essentially,
1379 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1380 * between direct I/Os and the truncate operation.
1382 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1383 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1384 * in the case that the caller is locking things out of order and
1385 * may not be able to call xfs_itruncate_finish() with the inode lock
1386 * held without dropping the I/O lock. If the caller must drop the
1387 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1388 * must be called again with all the same restrictions as the initial
1392 xfs_itruncate_start(
1395 xfs_fsize_t new_size
)
1397 xfs_fsize_t last_byte
;
1398 xfs_off_t toss_start
;
1402 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
1403 ASSERT((new_size
== 0) || (new_size
<= ip
->i_size
));
1404 ASSERT((flags
== XFS_ITRUNC_DEFINITE
) ||
1405 (flags
== XFS_ITRUNC_MAYBE
));
1409 /* wait for the completion of any pending DIOs */
1410 if (new_size
< ip
->i_size
)
1414 * Call toss_pages or flushinval_pages to get rid of pages
1415 * overlapping the region being removed. We have to use
1416 * the less efficient flushinval_pages in the case that the
1417 * caller may not be able to finish the truncate without
1418 * dropping the inode's I/O lock. Make sure
1419 * to catch any pages brought in by buffers overlapping
1420 * the EOF by searching out beyond the isize by our
1421 * block size. We round new_size up to a block boundary
1422 * so that we don't toss things on the same block as
1423 * new_size but before it.
1425 * Before calling toss_page or flushinval_pages, make sure to
1426 * call remapf() over the same region if the file is mapped.
1427 * This frees up mapped file references to the pages in the
1428 * given range and for the flushinval_pages case it ensures
1429 * that we get the latest mapped changes flushed out.
1431 toss_start
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)new_size
);
1432 toss_start
= XFS_FSB_TO_B(mp
, toss_start
);
1433 if (toss_start
< 0) {
1435 * The place to start tossing is beyond our maximum
1436 * file size, so there is no way that the data extended
1441 last_byte
= xfs_file_last_byte(ip
);
1442 xfs_itrunc_trace(XFS_ITRUNC_START
, ip
, flags
, new_size
, toss_start
,
1444 if (last_byte
> toss_start
) {
1445 if (flags
& XFS_ITRUNC_DEFINITE
) {
1446 xfs_tosspages(ip
, toss_start
,
1447 -1, FI_REMAPF_LOCKED
);
1449 error
= xfs_flushinval_pages(ip
, toss_start
,
1450 -1, FI_REMAPF_LOCKED
);
1455 if (new_size
== 0) {
1456 ASSERT(VN_CACHED(VFS_I(ip
)) == 0);
1463 * Shrink the file to the given new_size. The new size must be smaller than
1464 * the current size. This will free up the underlying blocks in the removed
1465 * range after a call to xfs_itruncate_start() or xfs_atruncate_start().
1467 * The transaction passed to this routine must have made a permanent log
1468 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1469 * given transaction and start new ones, so make sure everything involved in
1470 * the transaction is tidy before calling here. Some transaction will be
1471 * returned to the caller to be committed. The incoming transaction must
1472 * already include the inode, and both inode locks must be held exclusively.
1473 * The inode must also be "held" within the transaction. On return the inode
1474 * will be "held" within the returned transaction. This routine does NOT
1475 * require any disk space to be reserved for it within the transaction.
1477 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
1478 * indicates the fork which is to be truncated. For the attribute fork we only
1479 * support truncation to size 0.
1481 * We use the sync parameter to indicate whether or not the first transaction
1482 * we perform might have to be synchronous. For the attr fork, it needs to be
1483 * so if the unlink of the inode is not yet known to be permanent in the log.
1484 * This keeps us from freeing and reusing the blocks of the attribute fork
1485 * before the unlink of the inode becomes permanent.
1487 * For the data fork, we normally have to run synchronously if we're being
1488 * called out of the inactive path or we're being called out of the create path
1489 * where we're truncating an existing file. Either way, the truncate needs to
1490 * be sync so blocks don't reappear in the file with altered data in case of a
1491 * crash. wsync filesystems can run the first case async because anything that
1492 * shrinks the inode has to run sync so by the time we're called here from
1493 * inactive, the inode size is permanently set to 0.
1495 * Calls from the truncate path always need to be sync unless we're in a wsync
1496 * filesystem and the file has already been unlinked.
1498 * The caller is responsible for correctly setting the sync parameter. It gets
1499 * too hard for us to guess here which path we're being called out of just
1500 * based on inode state.
1502 * If we get an error, we must return with the inode locked and linked into the
1503 * current transaction. This keeps things simple for the higher level code,
1504 * because it always knows that the inode is locked and held in the transaction
1505 * that returns to it whether errors occur or not. We don't mark the inode
1506 * dirty on error so that transactions can be easily aborted if possible.
1509 xfs_itruncate_finish(
1512 xfs_fsize_t new_size
,
1516 xfs_fsblock_t first_block
;
1517 xfs_fileoff_t first_unmap_block
;
1518 xfs_fileoff_t last_block
;
1519 xfs_filblks_t unmap_len
=0;
1524 xfs_bmap_free_t free_list
;
1527 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
1528 ASSERT((new_size
== 0) || (new_size
<= ip
->i_size
));
1529 ASSERT(*tp
!= NULL
);
1530 ASSERT((*tp
)->t_flags
& XFS_TRANS_PERM_LOG_RES
);
1531 ASSERT(ip
->i_transp
== *tp
);
1532 ASSERT(ip
->i_itemp
!= NULL
);
1533 ASSERT(ip
->i_itemp
->ili_flags
& XFS_ILI_HOLD
);
1537 mp
= (ntp
)->t_mountp
;
1538 ASSERT(! XFS_NOT_DQATTACHED(mp
, ip
));
1541 * We only support truncating the entire attribute fork.
1543 if (fork
== XFS_ATTR_FORK
) {
1546 first_unmap_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)new_size
);
1547 xfs_itrunc_trace(XFS_ITRUNC_FINISH1
, ip
, 0, new_size
, 0, 0);
1549 * The first thing we do is set the size to new_size permanently
1550 * on disk. This way we don't have to worry about anyone ever
1551 * being able to look at the data being freed even in the face
1552 * of a crash. What we're getting around here is the case where
1553 * we free a block, it is allocated to another file, it is written
1554 * to, and then we crash. If the new data gets written to the
1555 * file but the log buffers containing the free and reallocation
1556 * don't, then we'd end up with garbage in the blocks being freed.
1557 * As long as we make the new_size permanent before actually
1558 * freeing any blocks it doesn't matter if they get writtten to.
1560 * The callers must signal into us whether or not the size
1561 * setting here must be synchronous. There are a few cases
1562 * where it doesn't have to be synchronous. Those cases
1563 * occur if the file is unlinked and we know the unlink is
1564 * permanent or if the blocks being truncated are guaranteed
1565 * to be beyond the inode eof (regardless of the link count)
1566 * and the eof value is permanent. Both of these cases occur
1567 * only on wsync-mounted filesystems. In those cases, we're
1568 * guaranteed that no user will ever see the data in the blocks
1569 * that are being truncated so the truncate can run async.
1570 * In the free beyond eof case, the file may wind up with
1571 * more blocks allocated to it than it needs if we crash
1572 * and that won't get fixed until the next time the file
1573 * is re-opened and closed but that's ok as that shouldn't
1574 * be too many blocks.
1576 * However, we can't just make all wsync xactions run async
1577 * because there's one call out of the create path that needs
1578 * to run sync where it's truncating an existing file to size
1579 * 0 whose size is > 0.
1581 * It's probably possible to come up with a test in this
1582 * routine that would correctly distinguish all the above
1583 * cases from the values of the function parameters and the
1584 * inode state but for sanity's sake, I've decided to let the
1585 * layers above just tell us. It's simpler to correctly figure
1586 * out in the layer above exactly under what conditions we
1587 * can run async and I think it's easier for others read and
1588 * follow the logic in case something has to be changed.
1589 * cscope is your friend -- rcc.
1591 * The attribute fork is much simpler.
1593 * For the attribute fork we allow the caller to tell us whether
1594 * the unlink of the inode that led to this call is yet permanent
1595 * in the on disk log. If it is not and we will be freeing extents
1596 * in this inode then we make the first transaction synchronous
1597 * to make sure that the unlink is permanent by the time we free
1600 if (fork
== XFS_DATA_FORK
) {
1601 if (ip
->i_d
.di_nextents
> 0) {
1603 * If we are not changing the file size then do
1604 * not update the on-disk file size - we may be
1605 * called from xfs_inactive_free_eofblocks(). If we
1606 * update the on-disk file size and then the system
1607 * crashes before the contents of the file are
1608 * flushed to disk then the files may be full of
1609 * holes (ie NULL files bug).
1611 if (ip
->i_size
!= new_size
) {
1612 ip
->i_d
.di_size
= new_size
;
1613 ip
->i_size
= new_size
;
1614 xfs_trans_log_inode(ntp
, ip
, XFS_ILOG_CORE
);
1618 ASSERT(!(mp
->m_flags
& XFS_MOUNT_WSYNC
));
1619 if (ip
->i_d
.di_anextents
> 0)
1620 xfs_trans_set_sync(ntp
);
1622 ASSERT(fork
== XFS_DATA_FORK
||
1623 (fork
== XFS_ATTR_FORK
&&
1624 ((sync
&& !(mp
->m_flags
& XFS_MOUNT_WSYNC
)) ||
1625 (sync
== 0 && (mp
->m_flags
& XFS_MOUNT_WSYNC
)))));
1628 * Since it is possible for space to become allocated beyond
1629 * the end of the file (in a crash where the space is allocated
1630 * but the inode size is not yet updated), simply remove any
1631 * blocks which show up between the new EOF and the maximum
1632 * possible file size. If the first block to be removed is
1633 * beyond the maximum file size (ie it is the same as last_block),
1634 * then there is nothing to do.
1636 last_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_MAXIOFFSET(mp
));
1637 ASSERT(first_unmap_block
<= last_block
);
1639 if (last_block
== first_unmap_block
) {
1642 unmap_len
= last_block
- first_unmap_block
+ 1;
1646 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1647 * will tell us whether it freed the entire range or
1648 * not. If this is a synchronous mount (wsync),
1649 * then we can tell bunmapi to keep all the
1650 * transactions asynchronous since the unlink
1651 * transaction that made this inode inactive has
1652 * already hit the disk. There's no danger of
1653 * the freed blocks being reused, there being a
1654 * crash, and the reused blocks suddenly reappearing
1655 * in this file with garbage in them once recovery
1658 XFS_BMAP_INIT(&free_list
, &first_block
);
1659 error
= xfs_bunmapi(ntp
, ip
,
1660 first_unmap_block
, unmap_len
,
1661 XFS_BMAPI_AFLAG(fork
) |
1662 (sync
? 0 : XFS_BMAPI_ASYNC
),
1663 XFS_ITRUNC_MAX_EXTENTS
,
1664 &first_block
, &free_list
,
1668 * If the bunmapi call encounters an error,
1669 * return to the caller where the transaction
1670 * can be properly aborted. We just need to
1671 * make sure we're not holding any resources
1672 * that we were not when we came in.
1674 xfs_bmap_cancel(&free_list
);
1679 * Duplicate the transaction that has the permanent
1680 * reservation and commit the old transaction.
1682 error
= xfs_bmap_finish(tp
, &free_list
, &committed
);
1685 /* link the inode into the next xact in the chain */
1686 xfs_trans_ijoin(ntp
, ip
,
1687 XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
1688 xfs_trans_ihold(ntp
, ip
);
1693 * If the bmap finish call encounters an error, return
1694 * to the caller where the transaction can be properly
1695 * aborted. We just need to make sure we're not
1696 * holding any resources that we were not when we came
1699 * Aborting from this point might lose some blocks in
1700 * the file system, but oh well.
1702 xfs_bmap_cancel(&free_list
);
1708 * Mark the inode dirty so it will be logged and
1709 * moved forward in the log as part of every commit.
1711 xfs_trans_log_inode(ntp
, ip
, XFS_ILOG_CORE
);
1714 ntp
= xfs_trans_dup(ntp
);
1715 error
= xfs_trans_commit(*tp
, 0);
1718 /* link the inode into the next transaction in the chain */
1719 xfs_trans_ijoin(ntp
, ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
1720 xfs_trans_ihold(ntp
, ip
);
1723 error
= xfs_trans_reserve(ntp
, 0,
1724 XFS_ITRUNCATE_LOG_RES(mp
), 0,
1725 XFS_TRANS_PERM_LOG_RES
,
1726 XFS_ITRUNCATE_LOG_COUNT
);
1731 * Only update the size in the case of the data fork, but
1732 * always re-log the inode so that our permanent transaction
1733 * can keep on rolling it forward in the log.
1735 if (fork
== XFS_DATA_FORK
) {
1736 xfs_isize_check(mp
, ip
, new_size
);
1738 * If we are not changing the file size then do
1739 * not update the on-disk file size - we may be
1740 * called from xfs_inactive_free_eofblocks(). If we
1741 * update the on-disk file size and then the system
1742 * crashes before the contents of the file are
1743 * flushed to disk then the files may be full of
1744 * holes (ie NULL files bug).
1746 if (ip
->i_size
!= new_size
) {
1747 ip
->i_d
.di_size
= new_size
;
1748 ip
->i_size
= new_size
;
1751 xfs_trans_log_inode(ntp
, ip
, XFS_ILOG_CORE
);
1752 ASSERT((new_size
!= 0) ||
1753 (fork
== XFS_ATTR_FORK
) ||
1754 (ip
->i_delayed_blks
== 0));
1755 ASSERT((new_size
!= 0) ||
1756 (fork
== XFS_ATTR_FORK
) ||
1757 (ip
->i_d
.di_nextents
== 0));
1758 xfs_itrunc_trace(XFS_ITRUNC_FINISH2
, ip
, 0, new_size
, 0, 0);
1763 * This is called when the inode's link count goes to 0.
1764 * We place the on-disk inode on a list in the AGI. It
1765 * will be pulled from this list when the inode is freed.
1777 xfs_agnumber_t agno
;
1778 xfs_daddr_t agdaddr
;
1785 ASSERT(ip
->i_d
.di_nlink
== 0);
1786 ASSERT(ip
->i_d
.di_mode
!= 0);
1787 ASSERT(ip
->i_transp
== tp
);
1791 agno
= XFS_INO_TO_AGNO(mp
, ip
->i_ino
);
1792 agdaddr
= XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR(mp
));
1795 * Get the agi buffer first. It ensures lock ordering
1798 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, agdaddr
,
1799 XFS_FSS_TO_BB(mp
, 1), 0, &agibp
);
1804 * Validate the magic number of the agi block.
1806 agi
= XFS_BUF_TO_AGI(agibp
);
1808 be32_to_cpu(agi
->agi_magicnum
) == XFS_AGI_MAGIC
&&
1809 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi
->agi_versionnum
));
1810 if (unlikely(XFS_TEST_ERROR(!agi_ok
, mp
, XFS_ERRTAG_IUNLINK
,
1811 XFS_RANDOM_IUNLINK
))) {
1812 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW
, mp
, agi
);
1813 xfs_trans_brelse(tp
, agibp
);
1814 return XFS_ERROR(EFSCORRUPTED
);
1817 * Get the index into the agi hash table for the
1818 * list this inode will go on.
1820 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1822 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1823 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1824 ASSERT(be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) != agino
);
1826 if (be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) != NULLAGINO
) {
1828 * There is already another inode in the bucket we need
1829 * to add ourselves to. Add us at the front of the list.
1830 * Here we put the head pointer into our next pointer,
1831 * and then we fall through to point the head at us.
1833 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &ibp
, 0, 0, XFS_BUF_LOCK
);
1837 ASSERT(be32_to_cpu(dip
->di_next_unlinked
) == NULLAGINO
);
1838 /* both on-disk, don't endian flip twice */
1839 dip
->di_next_unlinked
= agi
->agi_unlinked
[bucket_index
];
1840 offset
= ip
->i_boffset
+
1841 offsetof(xfs_dinode_t
, di_next_unlinked
);
1842 xfs_trans_inode_buf(tp
, ibp
);
1843 xfs_trans_log_buf(tp
, ibp
, offset
,
1844 (offset
+ sizeof(xfs_agino_t
) - 1));
1845 xfs_inobp_check(mp
, ibp
);
1849 * Point the bucket head pointer at the inode being inserted.
1852 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(agino
);
1853 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1854 (sizeof(xfs_agino_t
) * bucket_index
);
1855 xfs_trans_log_buf(tp
, agibp
, offset
,
1856 (offset
+ sizeof(xfs_agino_t
) - 1));
1861 * Pull the on-disk inode from the AGI unlinked list.
1874 xfs_agnumber_t agno
;
1875 xfs_daddr_t agdaddr
;
1877 xfs_agino_t next_agino
;
1878 xfs_buf_t
*last_ibp
;
1879 xfs_dinode_t
*last_dip
= NULL
;
1881 int offset
, last_offset
= 0;
1886 * First pull the on-disk inode from the AGI unlinked list.
1890 agno
= XFS_INO_TO_AGNO(mp
, ip
->i_ino
);
1891 agdaddr
= XFS_AG_DADDR(mp
, agno
, XFS_AGI_DADDR(mp
));
1894 * Get the agi buffer first. It ensures lock ordering
1897 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, agdaddr
,
1898 XFS_FSS_TO_BB(mp
, 1), 0, &agibp
);
1901 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.",
1902 error
, mp
->m_fsname
);
1906 * Validate the magic number of the agi block.
1908 agi
= XFS_BUF_TO_AGI(agibp
);
1910 be32_to_cpu(agi
->agi_magicnum
) == XFS_AGI_MAGIC
&&
1911 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi
->agi_versionnum
));
1912 if (unlikely(XFS_TEST_ERROR(!agi_ok
, mp
, XFS_ERRTAG_IUNLINK_REMOVE
,
1913 XFS_RANDOM_IUNLINK_REMOVE
))) {
1914 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW
,
1916 xfs_trans_brelse(tp
, agibp
);
1918 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.",
1920 return XFS_ERROR(EFSCORRUPTED
);
1923 * Get the index into the agi hash table for the
1924 * list this inode will go on.
1926 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1928 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1929 ASSERT(be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) != NULLAGINO
);
1930 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1932 if (be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) == agino
) {
1934 * We're at the head of the list. Get the inode's
1935 * on-disk buffer to see if there is anyone after us
1936 * on the list. Only modify our next pointer if it
1937 * is not already NULLAGINO. This saves us the overhead
1938 * of dealing with the buffer when there is no need to
1941 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &ibp
, 0, 0, XFS_BUF_LOCK
);
1944 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
1945 error
, mp
->m_fsname
);
1948 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
1949 ASSERT(next_agino
!= 0);
1950 if (next_agino
!= NULLAGINO
) {
1951 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
1952 offset
= ip
->i_boffset
+
1953 offsetof(xfs_dinode_t
, di_next_unlinked
);
1954 xfs_trans_inode_buf(tp
, ibp
);
1955 xfs_trans_log_buf(tp
, ibp
, offset
,
1956 (offset
+ sizeof(xfs_agino_t
) - 1));
1957 xfs_inobp_check(mp
, ibp
);
1959 xfs_trans_brelse(tp
, ibp
);
1962 * Point the bucket head pointer at the next inode.
1964 ASSERT(next_agino
!= 0);
1965 ASSERT(next_agino
!= agino
);
1966 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(next_agino
);
1967 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1968 (sizeof(xfs_agino_t
) * bucket_index
);
1969 xfs_trans_log_buf(tp
, agibp
, offset
,
1970 (offset
+ sizeof(xfs_agino_t
) - 1));
1973 * We need to search the list for the inode being freed.
1975 next_agino
= be32_to_cpu(agi
->agi_unlinked
[bucket_index
]);
1977 while (next_agino
!= agino
) {
1979 * If the last inode wasn't the one pointing to
1980 * us, then release its buffer since we're not
1981 * going to do anything with it.
1983 if (last_ibp
!= NULL
) {
1984 xfs_trans_brelse(tp
, last_ibp
);
1986 next_ino
= XFS_AGINO_TO_INO(mp
, agno
, next_agino
);
1987 error
= xfs_inotobp(mp
, tp
, next_ino
, &last_dip
,
1988 &last_ibp
, &last_offset
);
1991 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
1992 error
, mp
->m_fsname
);
1995 next_agino
= be32_to_cpu(last_dip
->di_next_unlinked
);
1996 ASSERT(next_agino
!= NULLAGINO
);
1997 ASSERT(next_agino
!= 0);
2000 * Now last_ibp points to the buffer previous to us on
2001 * the unlinked list. Pull us from the list.
2003 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &ibp
, 0, 0, XFS_BUF_LOCK
);
2006 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2007 error
, mp
->m_fsname
);
2010 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
2011 ASSERT(next_agino
!= 0);
2012 ASSERT(next_agino
!= agino
);
2013 if (next_agino
!= NULLAGINO
) {
2014 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
2015 offset
= ip
->i_boffset
+
2016 offsetof(xfs_dinode_t
, di_next_unlinked
);
2017 xfs_trans_inode_buf(tp
, ibp
);
2018 xfs_trans_log_buf(tp
, ibp
, offset
,
2019 (offset
+ sizeof(xfs_agino_t
) - 1));
2020 xfs_inobp_check(mp
, ibp
);
2022 xfs_trans_brelse(tp
, ibp
);
2025 * Point the previous inode on the list to the next inode.
2027 last_dip
->di_next_unlinked
= cpu_to_be32(next_agino
);
2028 ASSERT(next_agino
!= 0);
2029 offset
= last_offset
+ offsetof(xfs_dinode_t
, di_next_unlinked
);
2030 xfs_trans_inode_buf(tp
, last_ibp
);
2031 xfs_trans_log_buf(tp
, last_ibp
, offset
,
2032 (offset
+ sizeof(xfs_agino_t
) - 1));
2033 xfs_inobp_check(mp
, last_ibp
);
2040 xfs_inode_t
*free_ip
,
2044 xfs_mount_t
*mp
= free_ip
->i_mount
;
2045 int blks_per_cluster
;
2048 int i
, j
, found
, pre_flushed
;
2051 xfs_inode_t
*ip
, **ip_found
;
2052 xfs_inode_log_item_t
*iip
;
2053 xfs_log_item_t
*lip
;
2054 xfs_perag_t
*pag
= xfs_get_perag(mp
, inum
);
2056 if (mp
->m_sb
.sb_blocksize
>= XFS_INODE_CLUSTER_SIZE(mp
)) {
2057 blks_per_cluster
= 1;
2058 ninodes
= mp
->m_sb
.sb_inopblock
;
2059 nbufs
= XFS_IALLOC_BLOCKS(mp
);
2061 blks_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) /
2062 mp
->m_sb
.sb_blocksize
;
2063 ninodes
= blks_per_cluster
* mp
->m_sb
.sb_inopblock
;
2064 nbufs
= XFS_IALLOC_BLOCKS(mp
) / blks_per_cluster
;
2067 ip_found
= kmem_alloc(ninodes
* sizeof(xfs_inode_t
*), KM_NOFS
);
2069 for (j
= 0; j
< nbufs
; j
++, inum
+= ninodes
) {
2070 blkno
= XFS_AGB_TO_DADDR(mp
, XFS_INO_TO_AGNO(mp
, inum
),
2071 XFS_INO_TO_AGBNO(mp
, inum
));
2075 * Look for each inode in memory and attempt to lock it,
2076 * we can be racing with flush and tail pushing here.
2077 * any inode we get the locks on, add to an array of
2078 * inode items to process later.
2080 * The get the buffer lock, we could beat a flush
2081 * or tail pushing thread to the lock here, in which
2082 * case they will go looking for the inode buffer
2083 * and fail, we need some other form of interlock
2087 for (i
= 0; i
< ninodes
; i
++) {
2088 read_lock(&pag
->pag_ici_lock
);
2089 ip
= radix_tree_lookup(&pag
->pag_ici_root
,
2090 XFS_INO_TO_AGINO(mp
, (inum
+ i
)));
2092 /* Inode not in memory or we found it already,
2095 if (!ip
|| xfs_iflags_test(ip
, XFS_ISTALE
)) {
2096 read_unlock(&pag
->pag_ici_lock
);
2100 if (xfs_inode_clean(ip
)) {
2101 read_unlock(&pag
->pag_ici_lock
);
2105 /* If we can get the locks then add it to the
2106 * list, otherwise by the time we get the bp lock
2107 * below it will already be attached to the
2111 /* This inode will already be locked - by us, lets
2115 if (ip
== free_ip
) {
2116 if (xfs_iflock_nowait(ip
)) {
2117 xfs_iflags_set(ip
, XFS_ISTALE
);
2118 if (xfs_inode_clean(ip
)) {
2121 ip_found
[found
++] = ip
;
2124 read_unlock(&pag
->pag_ici_lock
);
2128 if (xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
)) {
2129 if (xfs_iflock_nowait(ip
)) {
2130 xfs_iflags_set(ip
, XFS_ISTALE
);
2132 if (xfs_inode_clean(ip
)) {
2134 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
2136 ip_found
[found
++] = ip
;
2139 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
2142 read_unlock(&pag
->pag_ici_lock
);
2145 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
, blkno
,
2146 mp
->m_bsize
* blks_per_cluster
,
2150 lip
= XFS_BUF_FSPRIVATE(bp
, xfs_log_item_t
*);
2152 if (lip
->li_type
== XFS_LI_INODE
) {
2153 iip
= (xfs_inode_log_item_t
*)lip
;
2154 ASSERT(iip
->ili_logged
== 1);
2155 lip
->li_cb
= (void(*)(xfs_buf_t
*,xfs_log_item_t
*)) xfs_istale_done
;
2156 spin_lock(&mp
->m_ail_lock
);
2157 iip
->ili_flush_lsn
= iip
->ili_item
.li_lsn
;
2158 spin_unlock(&mp
->m_ail_lock
);
2159 xfs_iflags_set(iip
->ili_inode
, XFS_ISTALE
);
2162 lip
= lip
->li_bio_list
;
2165 for (i
= 0; i
< found
; i
++) {
2170 ip
->i_update_core
= 0;
2172 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
2176 iip
->ili_last_fields
= iip
->ili_format
.ilf_fields
;
2177 iip
->ili_format
.ilf_fields
= 0;
2178 iip
->ili_logged
= 1;
2179 spin_lock(&mp
->m_ail_lock
);
2180 iip
->ili_flush_lsn
= iip
->ili_item
.li_lsn
;
2181 spin_unlock(&mp
->m_ail_lock
);
2183 xfs_buf_attach_iodone(bp
,
2184 (void(*)(xfs_buf_t
*,xfs_log_item_t
*))
2185 xfs_istale_done
, (xfs_log_item_t
*)iip
);
2186 if (ip
!= free_ip
) {
2187 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
2191 if (found
|| pre_flushed
)
2192 xfs_trans_stale_inode_buf(tp
, bp
);
2193 xfs_trans_binval(tp
, bp
);
2196 kmem_free(ip_found
);
2197 xfs_put_perag(mp
, pag
);
2201 * This is called to return an inode to the inode free list.
2202 * The inode should already be truncated to 0 length and have
2203 * no pages associated with it. This routine also assumes that
2204 * the inode is already a part of the transaction.
2206 * The on-disk copy of the inode will have been added to the list
2207 * of unlinked inodes in the AGI. We need to remove the inode from
2208 * that list atomically with respect to freeing it here.
2214 xfs_bmap_free_t
*flist
)
2218 xfs_ino_t first_ino
;
2222 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
2223 ASSERT(ip
->i_transp
== tp
);
2224 ASSERT(ip
->i_d
.di_nlink
== 0);
2225 ASSERT(ip
->i_d
.di_nextents
== 0);
2226 ASSERT(ip
->i_d
.di_anextents
== 0);
2227 ASSERT((ip
->i_d
.di_size
== 0 && ip
->i_size
== 0) ||
2228 ((ip
->i_d
.di_mode
& S_IFMT
) != S_IFREG
));
2229 ASSERT(ip
->i_d
.di_nblocks
== 0);
2232 * Pull the on-disk inode from the AGI unlinked list.
2234 error
= xfs_iunlink_remove(tp
, ip
);
2239 error
= xfs_difree(tp
, ip
->i_ino
, flist
, &delete, &first_ino
);
2243 ip
->i_d
.di_mode
= 0; /* mark incore inode as free */
2244 ip
->i_d
.di_flags
= 0;
2245 ip
->i_d
.di_dmevmask
= 0;
2246 ip
->i_d
.di_forkoff
= 0; /* mark the attr fork not in use */
2247 ip
->i_df
.if_ext_max
=
2248 XFS_IFORK_DSIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
2249 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
2250 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
2252 * Bump the generation count so no one will be confused
2253 * by reincarnations of this inode.
2257 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
2259 error
= xfs_itobp(ip
->i_mount
, tp
, ip
, &dip
, &ibp
, 0, 0, XFS_BUF_LOCK
);
2264 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2265 * from picking up this inode when it is reclaimed (its incore state
2266 * initialzed but not flushed to disk yet). The in-core di_mode is
2267 * already cleared and a corresponding transaction logged.
2268 * The hack here just synchronizes the in-core to on-disk
2269 * di_mode value in advance before the actual inode sync to disk.
2270 * This is OK because the inode is already unlinked and would never
2271 * change its di_mode again for this inode generation.
2272 * This is a temporary hack that would require a proper fix
2275 dip
->di_core
.di_mode
= 0;
2278 xfs_ifree_cluster(ip
, tp
, first_ino
);
2285 * Reallocate the space for if_broot based on the number of records
2286 * being added or deleted as indicated in rec_diff. Move the records
2287 * and pointers in if_broot to fit the new size. When shrinking this
2288 * will eliminate holes between the records and pointers created by
2289 * the caller. When growing this will create holes to be filled in
2292 * The caller must not request to add more records than would fit in
2293 * the on-disk inode root. If the if_broot is currently NULL, then
2294 * if we adding records one will be allocated. The caller must also
2295 * not request that the number of records go below zero, although
2296 * it can go to zero.
2298 * ip -- the inode whose if_broot area is changing
2299 * ext_diff -- the change in the number of records, positive or negative,
2300 * requested for the if_broot array.
2310 xfs_bmbt_block_t
*new_broot
;
2317 * Handle the degenerate case quietly.
2319 if (rec_diff
== 0) {
2323 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2326 * If there wasn't any memory allocated before, just
2327 * allocate it now and get out.
2329 if (ifp
->if_broot_bytes
== 0) {
2330 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff
);
2331 ifp
->if_broot
= (xfs_bmbt_block_t
*)kmem_alloc(new_size
,
2333 ifp
->if_broot_bytes
= (int)new_size
;
2338 * If there is already an existing if_broot, then we need
2339 * to realloc() it and shift the pointers to their new
2340 * location. The records don't change location because
2341 * they are kept butted up against the btree block header.
2343 cur_max
= XFS_BMAP_BROOT_MAXRECS(ifp
->if_broot_bytes
);
2344 new_max
= cur_max
+ rec_diff
;
2345 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max
);
2346 ifp
->if_broot
= (xfs_bmbt_block_t
*)
2347 kmem_realloc(ifp
->if_broot
,
2349 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max
), /* old size */
2351 op
= (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp
->if_broot
, 1,
2352 ifp
->if_broot_bytes
);
2353 np
= (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp
->if_broot
, 1,
2355 ifp
->if_broot_bytes
= (int)new_size
;
2356 ASSERT(ifp
->if_broot_bytes
<=
2357 XFS_IFORK_SIZE(ip
, whichfork
) + XFS_BROOT_SIZE_ADJ
);
2358 memmove(np
, op
, cur_max
* (uint
)sizeof(xfs_dfsbno_t
));
2363 * rec_diff is less than 0. In this case, we are shrinking the
2364 * if_broot buffer. It must already exist. If we go to zero
2365 * records, just get rid of the root and clear the status bit.
2367 ASSERT((ifp
->if_broot
!= NULL
) && (ifp
->if_broot_bytes
> 0));
2368 cur_max
= XFS_BMAP_BROOT_MAXRECS(ifp
->if_broot_bytes
);
2369 new_max
= cur_max
+ rec_diff
;
2370 ASSERT(new_max
>= 0);
2372 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max
);
2376 new_broot
= (xfs_bmbt_block_t
*)kmem_alloc(new_size
, KM_SLEEP
);
2378 * First copy over the btree block header.
2380 memcpy(new_broot
, ifp
->if_broot
, sizeof(xfs_bmbt_block_t
));
2383 ifp
->if_flags
&= ~XFS_IFBROOT
;
2387 * Only copy the records and pointers if there are any.
2391 * First copy the records.
2393 op
= (char *)XFS_BMAP_BROOT_REC_ADDR(ifp
->if_broot
, 1,
2394 ifp
->if_broot_bytes
);
2395 np
= (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot
, 1,
2397 memcpy(np
, op
, new_max
* (uint
)sizeof(xfs_bmbt_rec_t
));
2400 * Then copy the pointers.
2402 op
= (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp
->if_broot
, 1,
2403 ifp
->if_broot_bytes
);
2404 np
= (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot
, 1,
2406 memcpy(np
, op
, new_max
* (uint
)sizeof(xfs_dfsbno_t
));
2408 kmem_free(ifp
->if_broot
);
2409 ifp
->if_broot
= new_broot
;
2410 ifp
->if_broot_bytes
= (int)new_size
;
2411 ASSERT(ifp
->if_broot_bytes
<=
2412 XFS_IFORK_SIZE(ip
, whichfork
) + XFS_BROOT_SIZE_ADJ
);
2418 * This is called when the amount of space needed for if_data
2419 * is increased or decreased. The change in size is indicated by
2420 * the number of bytes that need to be added or deleted in the
2421 * byte_diff parameter.
2423 * If the amount of space needed has decreased below the size of the
2424 * inline buffer, then switch to using the inline buffer. Otherwise,
2425 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2426 * to what is needed.
2428 * ip -- the inode whose if_data area is changing
2429 * byte_diff -- the change in the number of bytes, positive or negative,
2430 * requested for the if_data array.
2442 if (byte_diff
== 0) {
2446 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2447 new_size
= (int)ifp
->if_bytes
+ byte_diff
;
2448 ASSERT(new_size
>= 0);
2450 if (new_size
== 0) {
2451 if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2452 kmem_free(ifp
->if_u1
.if_data
);
2454 ifp
->if_u1
.if_data
= NULL
;
2456 } else if (new_size
<= sizeof(ifp
->if_u2
.if_inline_data
)) {
2458 * If the valid extents/data can fit in if_inline_ext/data,
2459 * copy them from the malloc'd vector and free it.
2461 if (ifp
->if_u1
.if_data
== NULL
) {
2462 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
2463 } else if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2464 ASSERT(ifp
->if_real_bytes
!= 0);
2465 memcpy(ifp
->if_u2
.if_inline_data
, ifp
->if_u1
.if_data
,
2467 kmem_free(ifp
->if_u1
.if_data
);
2468 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
2473 * Stuck with malloc/realloc.
2474 * For inline data, the underlying buffer must be
2475 * a multiple of 4 bytes in size so that it can be
2476 * logged and stay on word boundaries. We enforce
2479 real_size
= roundup(new_size
, 4);
2480 if (ifp
->if_u1
.if_data
== NULL
) {
2481 ASSERT(ifp
->if_real_bytes
== 0);
2482 ifp
->if_u1
.if_data
= kmem_alloc(real_size
, KM_SLEEP
);
2483 } else if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2485 * Only do the realloc if the underlying size
2486 * is really changing.
2488 if (ifp
->if_real_bytes
!= real_size
) {
2489 ifp
->if_u1
.if_data
=
2490 kmem_realloc(ifp
->if_u1
.if_data
,
2496 ASSERT(ifp
->if_real_bytes
== 0);
2497 ifp
->if_u1
.if_data
= kmem_alloc(real_size
, KM_SLEEP
);
2498 memcpy(ifp
->if_u1
.if_data
, ifp
->if_u2
.if_inline_data
,
2502 ifp
->if_real_bytes
= real_size
;
2503 ifp
->if_bytes
= new_size
;
2504 ASSERT(ifp
->if_bytes
<= XFS_IFORK_SIZE(ip
, whichfork
));
2511 * Map inode to disk block and offset.
2513 * mp -- the mount point structure for the current file system
2514 * tp -- the current transaction
2515 * ino -- the inode number of the inode to be located
2516 * imap -- this structure is filled in with the information necessary
2517 * to retrieve the given inode from disk
2518 * flags -- flags to pass to xfs_dilocate indicating whether or not
2519 * lookups in the inode btree were OK or not
2529 xfs_fsblock_t fsbno
;
2534 fsbno
= imap
->im_blkno
?
2535 XFS_DADDR_TO_FSB(mp
, imap
->im_blkno
) : NULLFSBLOCK
;
2536 error
= xfs_dilocate(mp
, tp
, ino
, &fsbno
, &len
, &off
, flags
);
2540 imap
->im_blkno
= XFS_FSB_TO_DADDR(mp
, fsbno
);
2541 imap
->im_len
= XFS_FSB_TO_BB(mp
, len
);
2542 imap
->im_agblkno
= XFS_FSB_TO_AGBNO(mp
, fsbno
);
2543 imap
->im_ioffset
= (ushort
)off
;
2544 imap
->im_boffset
= (ushort
)(off
<< mp
->m_sb
.sb_inodelog
);
2547 * If the inode number maps to a block outside the bounds
2548 * of the file system then return NULL rather than calling
2549 * read_buf and panicing when we get an error from the
2552 if ((imap
->im_blkno
+ imap
->im_len
) >
2553 XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
)) {
2554 xfs_fs_cmn_err(CE_ALERT
, mp
, "xfs_imap: "
2555 "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
2556 " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
2557 (unsigned long long) imap
->im_blkno
,
2558 (unsigned long long) imap
->im_len
,
2559 XFS_FSB_TO_BB(mp
, mp
->m_sb
.sb_dblocks
));
2572 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2573 if (ifp
->if_broot
!= NULL
) {
2574 kmem_free(ifp
->if_broot
);
2575 ifp
->if_broot
= NULL
;
2579 * If the format is local, then we can't have an extents
2580 * array so just look for an inline data array. If we're
2581 * not local then we may or may not have an extents list,
2582 * so check and free it up if we do.
2584 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
) {
2585 if ((ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) &&
2586 (ifp
->if_u1
.if_data
!= NULL
)) {
2587 ASSERT(ifp
->if_real_bytes
!= 0);
2588 kmem_free(ifp
->if_u1
.if_data
);
2589 ifp
->if_u1
.if_data
= NULL
;
2590 ifp
->if_real_bytes
= 0;
2592 } else if ((ifp
->if_flags
& XFS_IFEXTENTS
) &&
2593 ((ifp
->if_flags
& XFS_IFEXTIREC
) ||
2594 ((ifp
->if_u1
.if_extents
!= NULL
) &&
2595 (ifp
->if_u1
.if_extents
!= ifp
->if_u2
.if_inline_ext
)))) {
2596 ASSERT(ifp
->if_real_bytes
!= 0);
2597 xfs_iext_destroy(ifp
);
2599 ASSERT(ifp
->if_u1
.if_extents
== NULL
||
2600 ifp
->if_u1
.if_extents
== ifp
->if_u2
.if_inline_ext
);
2601 ASSERT(ifp
->if_real_bytes
== 0);
2602 if (whichfork
== XFS_ATTR_FORK
) {
2603 kmem_zone_free(xfs_ifork_zone
, ip
->i_afp
);
2609 * This is called free all the memory associated with an inode.
2610 * It must free the inode itself and any buffers allocated for
2611 * if_extents/if_data and if_broot. It must also free the lock
2612 * associated with the inode.
2618 switch (ip
->i_d
.di_mode
& S_IFMT
) {
2622 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
2626 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
2627 mrfree(&ip
->i_lock
);
2628 mrfree(&ip
->i_iolock
);
2629 freesema(&ip
->i_flock
);
2631 #ifdef XFS_INODE_TRACE
2632 ktrace_free(ip
->i_trace
);
2634 #ifdef XFS_BMAP_TRACE
2635 ktrace_free(ip
->i_xtrace
);
2637 #ifdef XFS_BMBT_TRACE
2638 ktrace_free(ip
->i_btrace
);
2641 ktrace_free(ip
->i_rwtrace
);
2643 #ifdef XFS_ILOCK_TRACE
2644 ktrace_free(ip
->i_lock_trace
);
2646 #ifdef XFS_DIR2_TRACE
2647 ktrace_free(ip
->i_dir_trace
);
2651 * Only if we are shutting down the fs will we see an
2652 * inode still in the AIL. If it is there, we should remove
2653 * it to prevent a use-after-free from occurring.
2655 xfs_mount_t
*mp
= ip
->i_mount
;
2656 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
2658 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
2659 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
2660 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
2661 spin_lock(&mp
->m_ail_lock
);
2662 if (lip
->li_flags
& XFS_LI_IN_AIL
)
2663 xfs_trans_delete_ail(mp
, lip
);
2665 spin_unlock(&mp
->m_ail_lock
);
2667 xfs_inode_item_destroy(ip
);
2669 kmem_zone_free(xfs_inode_zone
, ip
);
2674 * Increment the pin count of the given buffer.
2675 * This value is protected by ipinlock spinlock in the mount structure.
2681 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
2683 atomic_inc(&ip
->i_pincount
);
2687 * Decrement the pin count of the given inode, and wake up
2688 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2689 * inode must have been previously pinned with a call to xfs_ipin().
2695 ASSERT(atomic_read(&ip
->i_pincount
) > 0);
2697 if (atomic_dec_and_test(&ip
->i_pincount
))
2698 wake_up(&ip
->i_ipin_wait
);
2702 * This is called to unpin an inode. It can be directed to wait or to return
2703 * immediately without waiting for the inode to be unpinned. The caller must
2704 * have the inode locked in at least shared mode so that the buffer cannot be
2705 * subsequently pinned once someone is waiting for it to be unpinned.
2712 xfs_inode_log_item_t
*iip
= ip
->i_itemp
;
2714 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2715 if (atomic_read(&ip
->i_pincount
) == 0)
2718 /* Give the log a push to start the unpinning I/O */
2719 xfs_log_force(ip
->i_mount
, (iip
&& iip
->ili_last_lsn
) ?
2720 iip
->ili_last_lsn
: 0, XFS_LOG_FORCE
);
2722 wait_event(ip
->i_ipin_wait
, (atomic_read(&ip
->i_pincount
) == 0));
2729 __xfs_iunpin_wait(ip
, 1);
2736 __xfs_iunpin_wait(ip
, 0);
2741 * xfs_iextents_copy()
2743 * This is called to copy the REAL extents (as opposed to the delayed
2744 * allocation extents) from the inode into the given buffer. It
2745 * returns the number of bytes copied into the buffer.
2747 * If there are no delayed allocation extents, then we can just
2748 * memcpy() the extents into the buffer. Otherwise, we need to
2749 * examine each extent in turn and skip those which are delayed.
2761 xfs_fsblock_t start_block
;
2763 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2764 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2765 ASSERT(ifp
->if_bytes
> 0);
2767 nrecs
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
2768 XFS_BMAP_TRACE_EXLIST(ip
, nrecs
, whichfork
);
2772 * There are some delayed allocation extents in the
2773 * inode, so copy the extents one at a time and skip
2774 * the delayed ones. There must be at least one
2775 * non-delayed extent.
2778 for (i
= 0; i
< nrecs
; i
++) {
2779 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
2780 start_block
= xfs_bmbt_get_startblock(ep
);
2781 if (ISNULLSTARTBLOCK(start_block
)) {
2783 * It's a delayed allocation extent, so skip it.
2788 /* Translate to on disk format */
2789 put_unaligned(cpu_to_be64(ep
->l0
), &dp
->l0
);
2790 put_unaligned(cpu_to_be64(ep
->l1
), &dp
->l1
);
2794 ASSERT(copied
!= 0);
2795 xfs_validate_extents(ifp
, copied
, XFS_EXTFMT_INODE(ip
));
2797 return (copied
* (uint
)sizeof(xfs_bmbt_rec_t
));
2801 * Each of the following cases stores data into the same region
2802 * of the on-disk inode, so only one of them can be valid at
2803 * any given time. While it is possible to have conflicting formats
2804 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2805 * in EXTENTS format, this can only happen when the fork has
2806 * changed formats after being modified but before being flushed.
2807 * In these cases, the format always takes precedence, because the
2808 * format indicates the current state of the fork.
2815 xfs_inode_log_item_t
*iip
,
2822 #ifdef XFS_TRANS_DEBUG
2825 static const short brootflag
[2] =
2826 { XFS_ILOG_DBROOT
, XFS_ILOG_ABROOT
};
2827 static const short dataflag
[2] =
2828 { XFS_ILOG_DDATA
, XFS_ILOG_ADATA
};
2829 static const short extflag
[2] =
2830 { XFS_ILOG_DEXT
, XFS_ILOG_AEXT
};
2834 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2836 * This can happen if we gave up in iformat in an error path,
2837 * for the attribute fork.
2840 ASSERT(whichfork
== XFS_ATTR_FORK
);
2843 cp
= XFS_DFORK_PTR(dip
, whichfork
);
2845 switch (XFS_IFORK_FORMAT(ip
, whichfork
)) {
2846 case XFS_DINODE_FMT_LOCAL
:
2847 if ((iip
->ili_format
.ilf_fields
& dataflag
[whichfork
]) &&
2848 (ifp
->if_bytes
> 0)) {
2849 ASSERT(ifp
->if_u1
.if_data
!= NULL
);
2850 ASSERT(ifp
->if_bytes
<= XFS_IFORK_SIZE(ip
, whichfork
));
2851 memcpy(cp
, ifp
->if_u1
.if_data
, ifp
->if_bytes
);
2855 case XFS_DINODE_FMT_EXTENTS
:
2856 ASSERT((ifp
->if_flags
& XFS_IFEXTENTS
) ||
2857 !(iip
->ili_format
.ilf_fields
& extflag
[whichfork
]));
2858 ASSERT((xfs_iext_get_ext(ifp
, 0) != NULL
) ||
2859 (ifp
->if_bytes
== 0));
2860 ASSERT((xfs_iext_get_ext(ifp
, 0) == NULL
) ||
2861 (ifp
->if_bytes
> 0));
2862 if ((iip
->ili_format
.ilf_fields
& extflag
[whichfork
]) &&
2863 (ifp
->if_bytes
> 0)) {
2864 ASSERT(XFS_IFORK_NEXTENTS(ip
, whichfork
) > 0);
2865 (void)xfs_iextents_copy(ip
, (xfs_bmbt_rec_t
*)cp
,
2870 case XFS_DINODE_FMT_BTREE
:
2871 if ((iip
->ili_format
.ilf_fields
& brootflag
[whichfork
]) &&
2872 (ifp
->if_broot_bytes
> 0)) {
2873 ASSERT(ifp
->if_broot
!= NULL
);
2874 ASSERT(ifp
->if_broot_bytes
<=
2875 (XFS_IFORK_SIZE(ip
, whichfork
) +
2876 XFS_BROOT_SIZE_ADJ
));
2877 xfs_bmbt_to_bmdr(ifp
->if_broot
, ifp
->if_broot_bytes
,
2878 (xfs_bmdr_block_t
*)cp
,
2879 XFS_DFORK_SIZE(dip
, mp
, whichfork
));
2883 case XFS_DINODE_FMT_DEV
:
2884 if (iip
->ili_format
.ilf_fields
& XFS_ILOG_DEV
) {
2885 ASSERT(whichfork
== XFS_DATA_FORK
);
2886 dip
->di_u
.di_dev
= cpu_to_be32(ip
->i_df
.if_u2
.if_rdev
);
2890 case XFS_DINODE_FMT_UUID
:
2891 if (iip
->ili_format
.ilf_fields
& XFS_ILOG_UUID
) {
2892 ASSERT(whichfork
== XFS_DATA_FORK
);
2893 memcpy(&dip
->di_u
.di_muuid
, &ip
->i_df
.if_u2
.if_uuid
,
2909 xfs_mount_t
*mp
= ip
->i_mount
;
2910 xfs_perag_t
*pag
= xfs_get_perag(mp
, ip
->i_ino
);
2911 unsigned long first_index
, mask
;
2912 unsigned long inodes_per_cluster
;
2914 xfs_inode_t
**ilist
;
2921 ASSERT(pag
->pagi_inodeok
);
2922 ASSERT(pag
->pag_ici_init
);
2924 inodes_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
;
2925 ilist_size
= inodes_per_cluster
* sizeof(xfs_inode_t
*);
2926 ilist
= kmem_alloc(ilist_size
, KM_MAYFAIL
|KM_NOFS
);
2930 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
2931 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
) & mask
;
2932 read_lock(&pag
->pag_ici_lock
);
2933 /* really need a gang lookup range call here */
2934 nr_found
= radix_tree_gang_lookup(&pag
->pag_ici_root
, (void**)ilist
,
2935 first_index
, inodes_per_cluster
);
2939 for (i
= 0; i
< nr_found
; i
++) {
2943 /* if the inode lies outside this cluster, we're done. */
2944 if ((XFS_INO_TO_AGINO(mp
, iq
->i_ino
) & mask
) != first_index
)
2947 * Do an un-protected check to see if the inode is dirty and
2948 * is a candidate for flushing. These checks will be repeated
2949 * later after the appropriate locks are acquired.
2951 if (xfs_inode_clean(iq
) && xfs_ipincount(iq
) == 0)
2955 * Try to get locks. If any are unavailable or it is pinned,
2956 * then this inode cannot be flushed and is skipped.
2959 if (!xfs_ilock_nowait(iq
, XFS_ILOCK_SHARED
))
2961 if (!xfs_iflock_nowait(iq
)) {
2962 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2965 if (xfs_ipincount(iq
)) {
2967 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2972 * arriving here means that this inode can be flushed. First
2973 * re-check that it's dirty before flushing.
2975 if (!xfs_inode_clean(iq
)) {
2977 error
= xfs_iflush_int(iq
, bp
);
2979 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2980 goto cluster_corrupt_out
;
2986 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2990 XFS_STATS_INC(xs_icluster_flushcnt
);
2991 XFS_STATS_ADD(xs_icluster_flushinode
, clcount
);
2995 read_unlock(&pag
->pag_ici_lock
);
3000 cluster_corrupt_out
:
3002 * Corruption detected in the clustering loop. Invalidate the
3003 * inode buffer and shut down the filesystem.
3005 read_unlock(&pag
->pag_ici_lock
);
3007 * Clean up the buffer. If it was B_DELWRI, just release it --
3008 * brelse can handle it with no problems. If not, shut down the
3009 * filesystem before releasing the buffer.
3011 bufwasdelwri
= XFS_BUF_ISDELAYWRITE(bp
);
3015 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
3017 if (!bufwasdelwri
) {
3019 * Just like incore_relse: if we have b_iodone functions,
3020 * mark the buffer as an error and call them. Otherwise
3021 * mark it as stale and brelse.
3023 if (XFS_BUF_IODONE_FUNC(bp
)) {
3024 XFS_BUF_CLR_BDSTRAT_FUNC(bp
);
3028 XFS_BUF_ERROR(bp
,EIO
);
3037 * Unlocks the flush lock
3039 xfs_iflush_abort(iq
);
3041 return XFS_ERROR(EFSCORRUPTED
);
3045 * xfs_iflush() will write a modified inode's changes out to the
3046 * inode's on disk home. The caller must have the inode lock held
3047 * in at least shared mode and the inode flush semaphore must be
3048 * held as well. The inode lock will still be held upon return from
3049 * the call and the caller is free to unlock it.
3050 * The inode flush lock will be unlocked when the inode reaches the disk.
3051 * The flags indicate how the inode's buffer should be written out.
3058 xfs_inode_log_item_t
*iip
;
3063 int noblock
= (flags
== XFS_IFLUSH_ASYNC_NOBLOCK
);
3064 enum { INT_DELWRI
= (1 << 0), INT_ASYNC
= (1 << 1) };
3066 XFS_STATS_INC(xs_iflush_count
);
3068 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
3069 ASSERT(issemalocked(&(ip
->i_flock
)));
3070 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
3071 ip
->i_d
.di_nextents
> ip
->i_df
.if_ext_max
);
3077 * If the inode isn't dirty, then just release the inode
3078 * flush lock and do nothing.
3080 if (xfs_inode_clean(ip
)) {
3086 * We can't flush the inode until it is unpinned, so wait for it if we
3087 * are allowed to block. We know noone new can pin it, because we are
3088 * holding the inode lock shared and you need to hold it exclusively to
3091 * If we are not allowed to block, force the log out asynchronously so
3092 * that when we come back the inode will be unpinned. If other inodes
3093 * in the same cluster are dirty, they will probably write the inode
3094 * out for us if they occur after the log force completes.
3096 if (noblock
&& xfs_ipincount(ip
)) {
3097 xfs_iunpin_nowait(ip
);
3101 xfs_iunpin_wait(ip
);
3104 * This may have been unpinned because the filesystem is shutting
3105 * down forcibly. If that's the case we must not write this inode
3106 * to disk, because the log record didn't make it to disk!
3108 if (XFS_FORCED_SHUTDOWN(mp
)) {
3109 ip
->i_update_core
= 0;
3111 iip
->ili_format
.ilf_fields
= 0;
3113 return XFS_ERROR(EIO
);
3117 * Decide how buffer will be flushed out. This is done before
3118 * the call to xfs_iflush_int because this field is zeroed by it.
3120 if (iip
!= NULL
&& iip
->ili_format
.ilf_fields
!= 0) {
3122 * Flush out the inode buffer according to the directions
3123 * of the caller. In the cases where the caller has given
3124 * us a choice choose the non-delwri case. This is because
3125 * the inode is in the AIL and we need to get it out soon.
3128 case XFS_IFLUSH_SYNC
:
3129 case XFS_IFLUSH_DELWRI_ELSE_SYNC
:
3132 case XFS_IFLUSH_ASYNC_NOBLOCK
:
3133 case XFS_IFLUSH_ASYNC
:
3134 case XFS_IFLUSH_DELWRI_ELSE_ASYNC
:
3137 case XFS_IFLUSH_DELWRI
:
3147 case XFS_IFLUSH_DELWRI_ELSE_SYNC
:
3148 case XFS_IFLUSH_DELWRI_ELSE_ASYNC
:
3149 case XFS_IFLUSH_DELWRI
:
3152 case XFS_IFLUSH_ASYNC_NOBLOCK
:
3153 case XFS_IFLUSH_ASYNC
:
3156 case XFS_IFLUSH_SYNC
:
3167 * Get the buffer containing the on-disk inode.
3169 error
= xfs_itobp(mp
, NULL
, ip
, &dip
, &bp
, 0, 0,
3170 noblock
? XFS_BUF_TRYLOCK
: XFS_BUF_LOCK
);
3177 * First flush out the inode that xfs_iflush was called with.
3179 error
= xfs_iflush_int(ip
, bp
);
3184 * If the buffer is pinned then push on the log now so we won't
3185 * get stuck waiting in the write for too long.
3187 if (XFS_BUF_ISPINNED(bp
))
3188 xfs_log_force(mp
, (xfs_lsn_t
)0, XFS_LOG_FORCE
);
3192 * see if other inodes can be gathered into this write
3194 error
= xfs_iflush_cluster(ip
, bp
);
3196 goto cluster_corrupt_out
;
3198 if (flags
& INT_DELWRI
) {
3199 xfs_bdwrite(mp
, bp
);
3200 } else if (flags
& INT_ASYNC
) {
3201 error
= xfs_bawrite(mp
, bp
);
3203 error
= xfs_bwrite(mp
, bp
);
3209 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
3210 cluster_corrupt_out
:
3212 * Unlocks the flush lock
3214 xfs_iflush_abort(ip
);
3215 return XFS_ERROR(EFSCORRUPTED
);
3224 xfs_inode_log_item_t
*iip
;
3227 #ifdef XFS_TRANS_DEBUG
3231 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
3232 ASSERT(issemalocked(&(ip
->i_flock
)));
3233 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
3234 ip
->i_d
.di_nextents
> ip
->i_df
.if_ext_max
);
3241 * If the inode isn't dirty, then just release the inode
3242 * flush lock and do nothing.
3244 if (xfs_inode_clean(ip
)) {
3249 /* set *dip = inode's place in the buffer */
3250 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
, ip
->i_boffset
);
3253 * Clear i_update_core before copying out the data.
3254 * This is for coordination with our timestamp updates
3255 * that don't hold the inode lock. They will always
3256 * update the timestamps BEFORE setting i_update_core,
3257 * so if we clear i_update_core after they set it we
3258 * are guaranteed to see their updates to the timestamps.
3259 * I believe that this depends on strongly ordered memory
3260 * semantics, but we have that. We use the SYNCHRONIZE
3261 * macro to make sure that the compiler does not reorder
3262 * the i_update_core access below the data copy below.
3264 ip
->i_update_core
= 0;
3268 * Make sure to get the latest atime from the Linux inode.
3270 xfs_synchronize_atime(ip
);
3272 if (XFS_TEST_ERROR(be16_to_cpu(dip
->di_core
.di_magic
) != XFS_DINODE_MAGIC
,
3273 mp
, XFS_ERRTAG_IFLUSH_1
, XFS_RANDOM_IFLUSH_1
)) {
3274 xfs_cmn_err(XFS_PTAG_IFLUSH
, CE_ALERT
, mp
,
3275 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3276 ip
->i_ino
, be16_to_cpu(dip
->di_core
.di_magic
), dip
);
3279 if (XFS_TEST_ERROR(ip
->i_d
.di_magic
!= XFS_DINODE_MAGIC
,
3280 mp
, XFS_ERRTAG_IFLUSH_2
, XFS_RANDOM_IFLUSH_2
)) {
3281 xfs_cmn_err(XFS_PTAG_IFLUSH
, CE_ALERT
, mp
,
3282 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3283 ip
->i_ino
, ip
, ip
->i_d
.di_magic
);
3286 if ((ip
->i_d
.di_mode
& S_IFMT
) == S_IFREG
) {
3288 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3289 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
),
3290 mp
, XFS_ERRTAG_IFLUSH_3
, XFS_RANDOM_IFLUSH_3
)) {
3291 xfs_cmn_err(XFS_PTAG_IFLUSH
, CE_ALERT
, mp
,
3292 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3296 } else if ((ip
->i_d
.di_mode
& S_IFMT
) == S_IFDIR
) {
3298 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
3299 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
) &&
3300 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
),
3301 mp
, XFS_ERRTAG_IFLUSH_4
, XFS_RANDOM_IFLUSH_4
)) {
3302 xfs_cmn_err(XFS_PTAG_IFLUSH
, CE_ALERT
, mp
,
3303 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3308 if (XFS_TEST_ERROR(ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
>
3309 ip
->i_d
.di_nblocks
, mp
, XFS_ERRTAG_IFLUSH_5
,
3310 XFS_RANDOM_IFLUSH_5
)) {
3311 xfs_cmn_err(XFS_PTAG_IFLUSH
, CE_ALERT
, mp
,
3312 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3314 ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
,
3319 if (XFS_TEST_ERROR(ip
->i_d
.di_forkoff
> mp
->m_sb
.sb_inodesize
,
3320 mp
, XFS_ERRTAG_IFLUSH_6
, XFS_RANDOM_IFLUSH_6
)) {
3321 xfs_cmn_err(XFS_PTAG_IFLUSH
, CE_ALERT
, mp
,
3322 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3323 ip
->i_ino
, ip
->i_d
.di_forkoff
, ip
);
3327 * bump the flush iteration count, used to detect flushes which
3328 * postdate a log record during recovery.
3331 ip
->i_d
.di_flushiter
++;
3334 * Copy the dirty parts of the inode into the on-disk
3335 * inode. We always copy out the core of the inode,
3336 * because if the inode is dirty at all the core must
3339 xfs_dinode_to_disk(&dip
->di_core
, &ip
->i_d
);
3341 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3342 if (ip
->i_d
.di_flushiter
== DI_MAX_FLUSH
)
3343 ip
->i_d
.di_flushiter
= 0;
3346 * If this is really an old format inode and the superblock version
3347 * has not been updated to support only new format inodes, then
3348 * convert back to the old inode format. If the superblock version
3349 * has been updated, then make the conversion permanent.
3351 ASSERT(ip
->i_d
.di_version
== XFS_DINODE_VERSION_1
||
3352 xfs_sb_version_hasnlink(&mp
->m_sb
));
3353 if (ip
->i_d
.di_version
== XFS_DINODE_VERSION_1
) {
3354 if (!xfs_sb_version_hasnlink(&mp
->m_sb
)) {
3358 ASSERT(ip
->i_d
.di_nlink
<= XFS_MAXLINK_1
);
3359 dip
->di_core
.di_onlink
= cpu_to_be16(ip
->i_d
.di_nlink
);
3362 * The superblock version has already been bumped,
3363 * so just make the conversion to the new inode
3366 ip
->i_d
.di_version
= XFS_DINODE_VERSION_2
;
3367 dip
->di_core
.di_version
= XFS_DINODE_VERSION_2
;
3368 ip
->i_d
.di_onlink
= 0;
3369 dip
->di_core
.di_onlink
= 0;
3370 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
3371 memset(&(dip
->di_core
.di_pad
[0]), 0,
3372 sizeof(dip
->di_core
.di_pad
));
3373 ASSERT(ip
->i_d
.di_projid
== 0);
3377 xfs_iflush_fork(ip
, dip
, iip
, XFS_DATA_FORK
, bp
);
3378 if (XFS_IFORK_Q(ip
))
3379 xfs_iflush_fork(ip
, dip
, iip
, XFS_ATTR_FORK
, bp
);
3380 xfs_inobp_check(mp
, bp
);
3383 * We've recorded everything logged in the inode, so we'd
3384 * like to clear the ilf_fields bits so we don't log and
3385 * flush things unnecessarily. However, we can't stop
3386 * logging all this information until the data we've copied
3387 * into the disk buffer is written to disk. If we did we might
3388 * overwrite the copy of the inode in the log with all the
3389 * data after re-logging only part of it, and in the face of
3390 * a crash we wouldn't have all the data we need to recover.
3392 * What we do is move the bits to the ili_last_fields field.
3393 * When logging the inode, these bits are moved back to the
3394 * ilf_fields field. In the xfs_iflush_done() routine we
3395 * clear ili_last_fields, since we know that the information
3396 * those bits represent is permanently on disk. As long as
3397 * the flush completes before the inode is logged again, then
3398 * both ilf_fields and ili_last_fields will be cleared.
3400 * We can play with the ilf_fields bits here, because the inode
3401 * lock must be held exclusively in order to set bits there
3402 * and the flush lock protects the ili_last_fields bits.
3403 * Set ili_logged so the flush done
3404 * routine can tell whether or not to look in the AIL.
3405 * Also, store the current LSN of the inode so that we can tell
3406 * whether the item has moved in the AIL from xfs_iflush_done().
3407 * In order to read the lsn we need the AIL lock, because
3408 * it is a 64 bit value that cannot be read atomically.
3410 if (iip
!= NULL
&& iip
->ili_format
.ilf_fields
!= 0) {
3411 iip
->ili_last_fields
= iip
->ili_format
.ilf_fields
;
3412 iip
->ili_format
.ilf_fields
= 0;
3413 iip
->ili_logged
= 1;
3415 ASSERT(sizeof(xfs_lsn_t
) == 8); /* don't lock if it shrinks */
3416 spin_lock(&mp
->m_ail_lock
);
3417 iip
->ili_flush_lsn
= iip
->ili_item
.li_lsn
;
3418 spin_unlock(&mp
->m_ail_lock
);
3421 * Attach the function xfs_iflush_done to the inode's
3422 * buffer. This will remove the inode from the AIL
3423 * and unlock the inode's flush lock when the inode is
3424 * completely written to disk.
3426 xfs_buf_attach_iodone(bp
, (void(*)(xfs_buf_t
*,xfs_log_item_t
*))
3427 xfs_iflush_done
, (xfs_log_item_t
*)iip
);
3429 ASSERT(XFS_BUF_FSPRIVATE(bp
, void *) != NULL
);
3430 ASSERT(XFS_BUF_IODONE_FUNC(bp
) != NULL
);
3433 * We're flushing an inode which is not in the AIL and has
3434 * not been logged but has i_update_core set. For this
3435 * case we can use a B_DELWRI flush and immediately drop
3436 * the inode flush lock because we can avoid the whole
3437 * AIL state thing. It's OK to drop the flush lock now,
3438 * because we've already locked the buffer and to do anything
3439 * you really need both.
3442 ASSERT(iip
->ili_logged
== 0);
3443 ASSERT(iip
->ili_last_fields
== 0);
3444 ASSERT((iip
->ili_item
.li_flags
& XFS_LI_IN_AIL
) == 0);
3452 return XFS_ERROR(EFSCORRUPTED
);
3457 * Flush all inactive inodes in mp.
3466 XFS_MOUNT_ILOCK(mp
);
3472 /* Make sure we skip markers inserted by sync */
3473 if (ip
->i_mount
== NULL
) {
3479 XFS_MOUNT_IUNLOCK(mp
);
3480 xfs_finish_reclaim(ip
, 0, XFS_IFLUSH_ASYNC
);
3484 ASSERT(vn_count(VFS_I(ip
)) == 0);
3487 } while (ip
!= mp
->m_inodes
);
3489 XFS_MOUNT_IUNLOCK(mp
);
3492 #ifdef XFS_ILOCK_TRACE
3493 ktrace_t
*xfs_ilock_trace_buf
;
3496 xfs_ilock_trace(xfs_inode_t
*ip
, int lock
, unsigned int lockflags
, inst_t
*ra
)
3498 ktrace_enter(ip
->i_lock_trace
,
3500 (void *)(unsigned long)lock
, /* 1 = LOCK, 3=UNLOCK, etc */
3501 (void *)(unsigned long)lockflags
, /* XFS_ILOCK_EXCL etc */
3502 (void *)ra
, /* caller of ilock */
3503 (void *)(unsigned long)current_cpu(),
3504 (void *)(unsigned long)current_pid(),
3505 NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
,NULL
);
3510 * Return a pointer to the extent record at file index idx.
3512 xfs_bmbt_rec_host_t
*
3514 xfs_ifork_t
*ifp
, /* inode fork pointer */
3515 xfs_extnum_t idx
) /* index of target extent */
3518 if ((ifp
->if_flags
& XFS_IFEXTIREC
) && (idx
== 0)) {
3519 return ifp
->if_u1
.if_ext_irec
->er_extbuf
;
3520 } else if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3521 xfs_ext_irec_t
*erp
; /* irec pointer */
3522 int erp_idx
= 0; /* irec index */
3523 xfs_extnum_t page_idx
= idx
; /* ext index in target list */
3525 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 0);
3526 return &erp
->er_extbuf
[page_idx
];
3527 } else if (ifp
->if_bytes
) {
3528 return &ifp
->if_u1
.if_extents
[idx
];
3535 * Insert new item(s) into the extent records for incore inode
3536 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3540 xfs_ifork_t
*ifp
, /* inode fork pointer */
3541 xfs_extnum_t idx
, /* starting index of new items */
3542 xfs_extnum_t count
, /* number of inserted items */
3543 xfs_bmbt_irec_t
*new) /* items to insert */
3545 xfs_extnum_t i
; /* extent record index */
3547 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
3548 xfs_iext_add(ifp
, idx
, count
);
3549 for (i
= idx
; i
< idx
+ count
; i
++, new++)
3550 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, i
), new);
3554 * This is called when the amount of space required for incore file
3555 * extents needs to be increased. The ext_diff parameter stores the
3556 * number of new extents being added and the idx parameter contains
3557 * the extent index where the new extents will be added. If the new
3558 * extents are being appended, then we just need to (re)allocate and
3559 * initialize the space. Otherwise, if the new extents are being
3560 * inserted into the middle of the existing entries, a bit more work
3561 * is required to make room for the new extents to be inserted. The
3562 * caller is responsible for filling in the new extent entries upon
3567 xfs_ifork_t
*ifp
, /* inode fork pointer */
3568 xfs_extnum_t idx
, /* index to begin adding exts */
3569 int ext_diff
) /* number of extents to add */
3571 int byte_diff
; /* new bytes being added */
3572 int new_size
; /* size of extents after adding */
3573 xfs_extnum_t nextents
; /* number of extents in file */
3575 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3576 ASSERT((idx
>= 0) && (idx
<= nextents
));
3577 byte_diff
= ext_diff
* sizeof(xfs_bmbt_rec_t
);
3578 new_size
= ifp
->if_bytes
+ byte_diff
;
3580 * If the new number of extents (nextents + ext_diff)
3581 * fits inside the inode, then continue to use the inline
3584 if (nextents
+ ext_diff
<= XFS_INLINE_EXTS
) {
3585 if (idx
< nextents
) {
3586 memmove(&ifp
->if_u2
.if_inline_ext
[idx
+ ext_diff
],
3587 &ifp
->if_u2
.if_inline_ext
[idx
],
3588 (nextents
- idx
) * sizeof(xfs_bmbt_rec_t
));
3589 memset(&ifp
->if_u2
.if_inline_ext
[idx
], 0, byte_diff
);
3591 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
3592 ifp
->if_real_bytes
= 0;
3593 ifp
->if_lastex
= nextents
+ ext_diff
;
3596 * Otherwise use a linear (direct) extent list.
3597 * If the extents are currently inside the inode,
3598 * xfs_iext_realloc_direct will switch us from
3599 * inline to direct extent allocation mode.
3601 else if (nextents
+ ext_diff
<= XFS_LINEAR_EXTS
) {
3602 xfs_iext_realloc_direct(ifp
, new_size
);
3603 if (idx
< nextents
) {
3604 memmove(&ifp
->if_u1
.if_extents
[idx
+ ext_diff
],
3605 &ifp
->if_u1
.if_extents
[idx
],
3606 (nextents
- idx
) * sizeof(xfs_bmbt_rec_t
));
3607 memset(&ifp
->if_u1
.if_extents
[idx
], 0, byte_diff
);
3610 /* Indirection array */
3612 xfs_ext_irec_t
*erp
;
3616 ASSERT(nextents
+ ext_diff
> XFS_LINEAR_EXTS
);
3617 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3618 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 1);
3620 xfs_iext_irec_init(ifp
);
3621 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3622 erp
= ifp
->if_u1
.if_ext_irec
;
3624 /* Extents fit in target extent page */
3625 if (erp
&& erp
->er_extcount
+ ext_diff
<= XFS_LINEAR_EXTS
) {
3626 if (page_idx
< erp
->er_extcount
) {
3627 memmove(&erp
->er_extbuf
[page_idx
+ ext_diff
],
3628 &erp
->er_extbuf
[page_idx
],
3629 (erp
->er_extcount
- page_idx
) *
3630 sizeof(xfs_bmbt_rec_t
));
3631 memset(&erp
->er_extbuf
[page_idx
], 0, byte_diff
);
3633 erp
->er_extcount
+= ext_diff
;
3634 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3636 /* Insert a new extent page */
3638 xfs_iext_add_indirect_multi(ifp
,
3639 erp_idx
, page_idx
, ext_diff
);
3642 * If extent(s) are being appended to the last page in
3643 * the indirection array and the new extent(s) don't fit
3644 * in the page, then erp is NULL and erp_idx is set to
3645 * the next index needed in the indirection array.
3648 int count
= ext_diff
;
3651 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3652 erp
->er_extcount
= count
;
3653 count
-= MIN(count
, (int)XFS_LINEAR_EXTS
);
3660 ifp
->if_bytes
= new_size
;
3664 * This is called when incore extents are being added to the indirection
3665 * array and the new extents do not fit in the target extent list. The
3666 * erp_idx parameter contains the irec index for the target extent list
3667 * in the indirection array, and the idx parameter contains the extent
3668 * index within the list. The number of extents being added is stored
3669 * in the count parameter.
3671 * |-------| |-------|
3672 * | | | | idx - number of extents before idx
3674 * | | | | count - number of extents being inserted at idx
3675 * |-------| |-------|
3676 * | count | | nex2 | nex2 - number of extents after idx + count
3677 * |-------| |-------|
3680 xfs_iext_add_indirect_multi(
3681 xfs_ifork_t
*ifp
, /* inode fork pointer */
3682 int erp_idx
, /* target extent irec index */
3683 xfs_extnum_t idx
, /* index within target list */
3684 int count
) /* new extents being added */
3686 int byte_diff
; /* new bytes being added */
3687 xfs_ext_irec_t
*erp
; /* pointer to irec entry */
3688 xfs_extnum_t ext_diff
; /* number of extents to add */
3689 xfs_extnum_t ext_cnt
; /* new extents still needed */
3690 xfs_extnum_t nex2
; /* extents after idx + count */
3691 xfs_bmbt_rec_t
*nex2_ep
= NULL
; /* temp list for nex2 extents */
3692 int nlists
; /* number of irec's (lists) */
3694 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3695 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3696 nex2
= erp
->er_extcount
- idx
;
3697 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3700 * Save second part of target extent list
3701 * (all extents past */
3703 byte_diff
= nex2
* sizeof(xfs_bmbt_rec_t
);
3704 nex2_ep
= (xfs_bmbt_rec_t
*) kmem_alloc(byte_diff
, KM_NOFS
);
3705 memmove(nex2_ep
, &erp
->er_extbuf
[idx
], byte_diff
);
3706 erp
->er_extcount
-= nex2
;
3707 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, -nex2
);
3708 memset(&erp
->er_extbuf
[idx
], 0, byte_diff
);
3712 * Add the new extents to the end of the target
3713 * list, then allocate new irec record(s) and
3714 * extent buffer(s) as needed to store the rest
3715 * of the new extents.
3718 ext_diff
= MIN(ext_cnt
, (int)XFS_LINEAR_EXTS
- erp
->er_extcount
);
3720 erp
->er_extcount
+= ext_diff
;
3721 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3722 ext_cnt
-= ext_diff
;
3726 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3727 ext_diff
= MIN(ext_cnt
, (int)XFS_LINEAR_EXTS
);
3728 erp
->er_extcount
= ext_diff
;
3729 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3730 ext_cnt
-= ext_diff
;
3733 /* Add nex2 extents back to indirection array */
3735 xfs_extnum_t ext_avail
;
3738 byte_diff
= nex2
* sizeof(xfs_bmbt_rec_t
);
3739 ext_avail
= XFS_LINEAR_EXTS
- erp
->er_extcount
;
3742 * If nex2 extents fit in the current page, append
3743 * nex2_ep after the new extents.
3745 if (nex2
<= ext_avail
) {
3746 i
= erp
->er_extcount
;
3749 * Otherwise, check if space is available in the
3752 else if ((erp_idx
< nlists
- 1) &&
3753 (nex2
<= (ext_avail
= XFS_LINEAR_EXTS
-
3754 ifp
->if_u1
.if_ext_irec
[erp_idx
+1].er_extcount
))) {
3757 /* Create a hole for nex2 extents */
3758 memmove(&erp
->er_extbuf
[nex2
], erp
->er_extbuf
,
3759 erp
->er_extcount
* sizeof(xfs_bmbt_rec_t
));
3762 * Final choice, create a new extent page for
3767 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3769 memmove(&erp
->er_extbuf
[i
], nex2_ep
, byte_diff
);
3771 erp
->er_extcount
+= nex2
;
3772 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, nex2
);
3777 * This is called when the amount of space required for incore file
3778 * extents needs to be decreased. The ext_diff parameter stores the
3779 * number of extents to be removed and the idx parameter contains
3780 * the extent index where the extents will be removed from.
3782 * If the amount of space needed has decreased below the linear
3783 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3784 * extent array. Otherwise, use kmem_realloc() to adjust the
3785 * size to what is needed.
3789 xfs_ifork_t
*ifp
, /* inode fork pointer */
3790 xfs_extnum_t idx
, /* index to begin removing exts */
3791 int ext_diff
) /* number of extents to remove */
3793 xfs_extnum_t nextents
; /* number of extents in file */
3794 int new_size
; /* size of extents after removal */
3796 ASSERT(ext_diff
> 0);
3797 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3798 new_size
= (nextents
- ext_diff
) * sizeof(xfs_bmbt_rec_t
);
3800 if (new_size
== 0) {
3801 xfs_iext_destroy(ifp
);
3802 } else if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3803 xfs_iext_remove_indirect(ifp
, idx
, ext_diff
);
3804 } else if (ifp
->if_real_bytes
) {
3805 xfs_iext_remove_direct(ifp
, idx
, ext_diff
);
3807 xfs_iext_remove_inline(ifp
, idx
, ext_diff
);
3809 ifp
->if_bytes
= new_size
;
3813 * This removes ext_diff extents from the inline buffer, beginning
3814 * at extent index idx.
3817 xfs_iext_remove_inline(
3818 xfs_ifork_t
*ifp
, /* inode fork pointer */
3819 xfs_extnum_t idx
, /* index to begin removing exts */
3820 int ext_diff
) /* number of extents to remove */
3822 int nextents
; /* number of extents in file */
3824 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3825 ASSERT(idx
< XFS_INLINE_EXTS
);
3826 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3827 ASSERT(((nextents
- ext_diff
) > 0) &&
3828 (nextents
- ext_diff
) < XFS_INLINE_EXTS
);
3830 if (idx
+ ext_diff
< nextents
) {
3831 memmove(&ifp
->if_u2
.if_inline_ext
[idx
],
3832 &ifp
->if_u2
.if_inline_ext
[idx
+ ext_diff
],
3833 (nextents
- (idx
+ ext_diff
)) *
3834 sizeof(xfs_bmbt_rec_t
));
3835 memset(&ifp
->if_u2
.if_inline_ext
[nextents
- ext_diff
],
3836 0, ext_diff
* sizeof(xfs_bmbt_rec_t
));
3838 memset(&ifp
->if_u2
.if_inline_ext
[idx
], 0,
3839 ext_diff
* sizeof(xfs_bmbt_rec_t
));
3844 * This removes ext_diff extents from a linear (direct) extent list,
3845 * beginning at extent index idx. If the extents are being removed
3846 * from the end of the list (ie. truncate) then we just need to re-
3847 * allocate the list to remove the extra space. Otherwise, if the
3848 * extents are being removed from the middle of the existing extent
3849 * entries, then we first need to move the extent records beginning
3850 * at idx + ext_diff up in the list to overwrite the records being
3851 * removed, then remove the extra space via kmem_realloc.
3854 xfs_iext_remove_direct(
3855 xfs_ifork_t
*ifp
, /* inode fork pointer */
3856 xfs_extnum_t idx
, /* index to begin removing exts */
3857 int ext_diff
) /* number of extents to remove */
3859 xfs_extnum_t nextents
; /* number of extents in file */
3860 int new_size
; /* size of extents after removal */
3862 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3863 new_size
= ifp
->if_bytes
-
3864 (ext_diff
* sizeof(xfs_bmbt_rec_t
));
3865 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3867 if (new_size
== 0) {
3868 xfs_iext_destroy(ifp
);
3871 /* Move extents up in the list (if needed) */
3872 if (idx
+ ext_diff
< nextents
) {
3873 memmove(&ifp
->if_u1
.if_extents
[idx
],
3874 &ifp
->if_u1
.if_extents
[idx
+ ext_diff
],
3875 (nextents
- (idx
+ ext_diff
)) *
3876 sizeof(xfs_bmbt_rec_t
));
3878 memset(&ifp
->if_u1
.if_extents
[nextents
- ext_diff
],
3879 0, ext_diff
* sizeof(xfs_bmbt_rec_t
));
3881 * Reallocate the direct extent list. If the extents
3882 * will fit inside the inode then xfs_iext_realloc_direct
3883 * will switch from direct to inline extent allocation
3886 xfs_iext_realloc_direct(ifp
, new_size
);
3887 ifp
->if_bytes
= new_size
;
3891 * This is called when incore extents are being removed from the
3892 * indirection array and the extents being removed span multiple extent
3893 * buffers. The idx parameter contains the file extent index where we
3894 * want to begin removing extents, and the count parameter contains
3895 * how many extents need to be removed.
3897 * |-------| |-------|
3898 * | nex1 | | | nex1 - number of extents before idx
3899 * |-------| | count |
3900 * | | | | count - number of extents being removed at idx
3901 * | count | |-------|
3902 * | | | nex2 | nex2 - number of extents after idx + count
3903 * |-------| |-------|
3906 xfs_iext_remove_indirect(
3907 xfs_ifork_t
*ifp
, /* inode fork pointer */
3908 xfs_extnum_t idx
, /* index to begin removing extents */
3909 int count
) /* number of extents to remove */
3911 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3912 int erp_idx
= 0; /* indirection array index */
3913 xfs_extnum_t ext_cnt
; /* extents left to remove */
3914 xfs_extnum_t ext_diff
; /* extents to remove in current list */
3915 xfs_extnum_t nex1
; /* number of extents before idx */
3916 xfs_extnum_t nex2
; /* extents after idx + count */
3917 int nlists
; /* entries in indirection array */
3918 int page_idx
= idx
; /* index in target extent list */
3920 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3921 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 0);
3922 ASSERT(erp
!= NULL
);
3923 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3927 nex2
= MAX((erp
->er_extcount
- (nex1
+ ext_cnt
)), 0);
3928 ext_diff
= MIN(ext_cnt
, (erp
->er_extcount
- nex1
));
3930 * Check for deletion of entire list;
3931 * xfs_iext_irec_remove() updates extent offsets.
3933 if (ext_diff
== erp
->er_extcount
) {
3934 xfs_iext_irec_remove(ifp
, erp_idx
);
3935 ext_cnt
-= ext_diff
;
3938 ASSERT(erp_idx
< ifp
->if_real_bytes
/
3940 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3947 /* Move extents up (if needed) */
3949 memmove(&erp
->er_extbuf
[nex1
],
3950 &erp
->er_extbuf
[nex1
+ ext_diff
],
3951 nex2
* sizeof(xfs_bmbt_rec_t
));
3953 /* Zero out rest of page */
3954 memset(&erp
->er_extbuf
[nex1
+ nex2
], 0, (XFS_IEXT_BUFSZ
-
3955 ((nex1
+ nex2
) * sizeof(xfs_bmbt_rec_t
))));
3956 /* Update remaining counters */
3957 erp
->er_extcount
-= ext_diff
;
3958 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, -ext_diff
);
3959 ext_cnt
-= ext_diff
;
3964 ifp
->if_bytes
-= count
* sizeof(xfs_bmbt_rec_t
);
3965 xfs_iext_irec_compact(ifp
);
3969 * Create, destroy, or resize a linear (direct) block of extents.
3972 xfs_iext_realloc_direct(
3973 xfs_ifork_t
*ifp
, /* inode fork pointer */
3974 int new_size
) /* new size of extents */
3976 int rnew_size
; /* real new size of extents */
3978 rnew_size
= new_size
;
3980 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
) ||
3981 ((new_size
>= 0) && (new_size
<= XFS_IEXT_BUFSZ
) &&
3982 (new_size
!= ifp
->if_real_bytes
)));
3984 /* Free extent records */
3985 if (new_size
== 0) {
3986 xfs_iext_destroy(ifp
);
3988 /* Resize direct extent list and zero any new bytes */
3989 else if (ifp
->if_real_bytes
) {
3990 /* Check if extents will fit inside the inode */
3991 if (new_size
<= XFS_INLINE_EXTS
* sizeof(xfs_bmbt_rec_t
)) {
3992 xfs_iext_direct_to_inline(ifp
, new_size
/
3993 (uint
)sizeof(xfs_bmbt_rec_t
));
3994 ifp
->if_bytes
= new_size
;
3997 if (!is_power_of_2(new_size
)){
3998 rnew_size
= roundup_pow_of_two(new_size
);
4000 if (rnew_size
!= ifp
->if_real_bytes
) {
4001 ifp
->if_u1
.if_extents
=
4002 kmem_realloc(ifp
->if_u1
.if_extents
,
4004 ifp
->if_real_bytes
, KM_NOFS
);
4006 if (rnew_size
> ifp
->if_real_bytes
) {
4007 memset(&ifp
->if_u1
.if_extents
[ifp
->if_bytes
/
4008 (uint
)sizeof(xfs_bmbt_rec_t
)], 0,
4009 rnew_size
- ifp
->if_real_bytes
);
4013 * Switch from the inline extent buffer to a direct
4014 * extent list. Be sure to include the inline extent
4015 * bytes in new_size.
4018 new_size
+= ifp
->if_bytes
;
4019 if (!is_power_of_2(new_size
)) {
4020 rnew_size
= roundup_pow_of_two(new_size
);
4022 xfs_iext_inline_to_direct(ifp
, rnew_size
);
4024 ifp
->if_real_bytes
= rnew_size
;
4025 ifp
->if_bytes
= new_size
;
4029 * Switch from linear (direct) extent records to inline buffer.
4032 xfs_iext_direct_to_inline(
4033 xfs_ifork_t
*ifp
, /* inode fork pointer */
4034 xfs_extnum_t nextents
) /* number of extents in file */
4036 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
4037 ASSERT(nextents
<= XFS_INLINE_EXTS
);
4039 * The inline buffer was zeroed when we switched
4040 * from inline to direct extent allocation mode,
4041 * so we don't need to clear it here.
4043 memcpy(ifp
->if_u2
.if_inline_ext
, ifp
->if_u1
.if_extents
,
4044 nextents
* sizeof(xfs_bmbt_rec_t
));
4045 kmem_free(ifp
->if_u1
.if_extents
);
4046 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
4047 ifp
->if_real_bytes
= 0;
4051 * Switch from inline buffer to linear (direct) extent records.
4052 * new_size should already be rounded up to the next power of 2
4053 * by the caller (when appropriate), so use new_size as it is.
4054 * However, since new_size may be rounded up, we can't update
4055 * if_bytes here. It is the caller's responsibility to update
4056 * if_bytes upon return.
4059 xfs_iext_inline_to_direct(
4060 xfs_ifork_t
*ifp
, /* inode fork pointer */
4061 int new_size
) /* number of extents in file */
4063 ifp
->if_u1
.if_extents
= kmem_alloc(new_size
, KM_NOFS
);
4064 memset(ifp
->if_u1
.if_extents
, 0, new_size
);
4065 if (ifp
->if_bytes
) {
4066 memcpy(ifp
->if_u1
.if_extents
, ifp
->if_u2
.if_inline_ext
,
4068 memset(ifp
->if_u2
.if_inline_ext
, 0, XFS_INLINE_EXTS
*
4069 sizeof(xfs_bmbt_rec_t
));
4071 ifp
->if_real_bytes
= new_size
;
4075 * Resize an extent indirection array to new_size bytes.
4078 xfs_iext_realloc_indirect(
4079 xfs_ifork_t
*ifp
, /* inode fork pointer */
4080 int new_size
) /* new indirection array size */
4082 int nlists
; /* number of irec's (ex lists) */
4083 int size
; /* current indirection array size */
4085 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4086 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4087 size
= nlists
* sizeof(xfs_ext_irec_t
);
4088 ASSERT(ifp
->if_real_bytes
);
4089 ASSERT((new_size
>= 0) && (new_size
!= size
));
4090 if (new_size
== 0) {
4091 xfs_iext_destroy(ifp
);
4093 ifp
->if_u1
.if_ext_irec
= (xfs_ext_irec_t
*)
4094 kmem_realloc(ifp
->if_u1
.if_ext_irec
,
4095 new_size
, size
, KM_NOFS
);
4100 * Switch from indirection array to linear (direct) extent allocations.
4103 xfs_iext_indirect_to_direct(
4104 xfs_ifork_t
*ifp
) /* inode fork pointer */
4106 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
4107 xfs_extnum_t nextents
; /* number of extents in file */
4108 int size
; /* size of file extents */
4110 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4111 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
4112 ASSERT(nextents
<= XFS_LINEAR_EXTS
);
4113 size
= nextents
* sizeof(xfs_bmbt_rec_t
);
4115 xfs_iext_irec_compact_full(ifp
);
4116 ASSERT(ifp
->if_real_bytes
== XFS_IEXT_BUFSZ
);
4118 ep
= ifp
->if_u1
.if_ext_irec
->er_extbuf
;
4119 kmem_free(ifp
->if_u1
.if_ext_irec
);
4120 ifp
->if_flags
&= ~XFS_IFEXTIREC
;
4121 ifp
->if_u1
.if_extents
= ep
;
4122 ifp
->if_bytes
= size
;
4123 if (nextents
< XFS_LINEAR_EXTS
) {
4124 xfs_iext_realloc_direct(ifp
, size
);
4129 * Free incore file extents.
4133 xfs_ifork_t
*ifp
) /* inode fork pointer */
4135 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
4139 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4140 for (erp_idx
= nlists
- 1; erp_idx
>= 0 ; erp_idx
--) {
4141 xfs_iext_irec_remove(ifp
, erp_idx
);
4143 ifp
->if_flags
&= ~XFS_IFEXTIREC
;
4144 } else if (ifp
->if_real_bytes
) {
4145 kmem_free(ifp
->if_u1
.if_extents
);
4146 } else if (ifp
->if_bytes
) {
4147 memset(ifp
->if_u2
.if_inline_ext
, 0, XFS_INLINE_EXTS
*
4148 sizeof(xfs_bmbt_rec_t
));
4150 ifp
->if_u1
.if_extents
= NULL
;
4151 ifp
->if_real_bytes
= 0;
4156 * Return a pointer to the extent record for file system block bno.
4158 xfs_bmbt_rec_host_t
* /* pointer to found extent record */
4159 xfs_iext_bno_to_ext(
4160 xfs_ifork_t
*ifp
, /* inode fork pointer */
4161 xfs_fileoff_t bno
, /* block number to search for */
4162 xfs_extnum_t
*idxp
) /* index of target extent */
4164 xfs_bmbt_rec_host_t
*base
; /* pointer to first extent */
4165 xfs_filblks_t blockcount
= 0; /* number of blocks in extent */
4166 xfs_bmbt_rec_host_t
*ep
= NULL
; /* pointer to target extent */
4167 xfs_ext_irec_t
*erp
= NULL
; /* indirection array pointer */
4168 int high
; /* upper boundary in search */
4169 xfs_extnum_t idx
= 0; /* index of target extent */
4170 int low
; /* lower boundary in search */
4171 xfs_extnum_t nextents
; /* number of file extents */
4172 xfs_fileoff_t startoff
= 0; /* start offset of extent */
4174 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
4175 if (nextents
== 0) {
4180 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
4181 /* Find target extent list */
4183 erp
= xfs_iext_bno_to_irec(ifp
, bno
, &erp_idx
);
4184 base
= erp
->er_extbuf
;
4185 high
= erp
->er_extcount
- 1;
4187 base
= ifp
->if_u1
.if_extents
;
4188 high
= nextents
- 1;
4190 /* Binary search extent records */
4191 while (low
<= high
) {
4192 idx
= (low
+ high
) >> 1;
4194 startoff
= xfs_bmbt_get_startoff(ep
);
4195 blockcount
= xfs_bmbt_get_blockcount(ep
);
4196 if (bno
< startoff
) {
4198 } else if (bno
>= startoff
+ blockcount
) {
4201 /* Convert back to file-based extent index */
4202 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
4203 idx
+= erp
->er_extoff
;
4209 /* Convert back to file-based extent index */
4210 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
4211 idx
+= erp
->er_extoff
;
4213 if (bno
>= startoff
+ blockcount
) {
4214 if (++idx
== nextents
) {
4217 ep
= xfs_iext_get_ext(ifp
, idx
);
4225 * Return a pointer to the indirection array entry containing the
4226 * extent record for filesystem block bno. Store the index of the
4227 * target irec in *erp_idxp.
4229 xfs_ext_irec_t
* /* pointer to found extent record */
4230 xfs_iext_bno_to_irec(
4231 xfs_ifork_t
*ifp
, /* inode fork pointer */
4232 xfs_fileoff_t bno
, /* block number to search for */
4233 int *erp_idxp
) /* irec index of target ext list */
4235 xfs_ext_irec_t
*erp
= NULL
; /* indirection array pointer */
4236 xfs_ext_irec_t
*erp_next
; /* next indirection array entry */
4237 int erp_idx
; /* indirection array index */
4238 int nlists
; /* number of extent irec's (lists) */
4239 int high
; /* binary search upper limit */
4240 int low
; /* binary search lower limit */
4242 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4243 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4247 while (low
<= high
) {
4248 erp_idx
= (low
+ high
) >> 1;
4249 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
4250 erp_next
= erp_idx
< nlists
- 1 ? erp
+ 1 : NULL
;
4251 if (bno
< xfs_bmbt_get_startoff(erp
->er_extbuf
)) {
4253 } else if (erp_next
&& bno
>=
4254 xfs_bmbt_get_startoff(erp_next
->er_extbuf
)) {
4260 *erp_idxp
= erp_idx
;
4265 * Return a pointer to the indirection array entry containing the
4266 * extent record at file extent index *idxp. Store the index of the
4267 * target irec in *erp_idxp and store the page index of the target
4268 * extent record in *idxp.
4271 xfs_iext_idx_to_irec(
4272 xfs_ifork_t
*ifp
, /* inode fork pointer */
4273 xfs_extnum_t
*idxp
, /* extent index (file -> page) */
4274 int *erp_idxp
, /* pointer to target irec */
4275 int realloc
) /* new bytes were just added */
4277 xfs_ext_irec_t
*prev
; /* pointer to previous irec */
4278 xfs_ext_irec_t
*erp
= NULL
; /* pointer to current irec */
4279 int erp_idx
; /* indirection array index */
4280 int nlists
; /* number of irec's (ex lists) */
4281 int high
; /* binary search upper limit */
4282 int low
; /* binary search lower limit */
4283 xfs_extnum_t page_idx
= *idxp
; /* extent index in target list */
4285 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4286 ASSERT(page_idx
>= 0 && page_idx
<=
4287 ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
));
4288 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4293 /* Binary search extent irec's */
4294 while (low
<= high
) {
4295 erp_idx
= (low
+ high
) >> 1;
4296 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
4297 prev
= erp_idx
> 0 ? erp
- 1 : NULL
;
4298 if (page_idx
< erp
->er_extoff
|| (page_idx
== erp
->er_extoff
&&
4299 realloc
&& prev
&& prev
->er_extcount
< XFS_LINEAR_EXTS
)) {
4301 } else if (page_idx
> erp
->er_extoff
+ erp
->er_extcount
||
4302 (page_idx
== erp
->er_extoff
+ erp
->er_extcount
&&
4305 } else if (page_idx
== erp
->er_extoff
+ erp
->er_extcount
&&
4306 erp
->er_extcount
== XFS_LINEAR_EXTS
) {
4310 erp
= erp_idx
< nlists
? erp
+ 1 : NULL
;
4313 page_idx
-= erp
->er_extoff
;
4318 *erp_idxp
= erp_idx
;
4323 * Allocate and initialize an indirection array once the space needed
4324 * for incore extents increases above XFS_IEXT_BUFSZ.
4328 xfs_ifork_t
*ifp
) /* inode fork pointer */
4330 xfs_ext_irec_t
*erp
; /* indirection array pointer */
4331 xfs_extnum_t nextents
; /* number of extents in file */
4333 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
4334 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
4335 ASSERT(nextents
<= XFS_LINEAR_EXTS
);
4337 erp
= kmem_alloc(sizeof(xfs_ext_irec_t
), KM_NOFS
);
4339 if (nextents
== 0) {
4340 ifp
->if_u1
.if_extents
= kmem_alloc(XFS_IEXT_BUFSZ
, KM_NOFS
);
4341 } else if (!ifp
->if_real_bytes
) {
4342 xfs_iext_inline_to_direct(ifp
, XFS_IEXT_BUFSZ
);
4343 } else if (ifp
->if_real_bytes
< XFS_IEXT_BUFSZ
) {
4344 xfs_iext_realloc_direct(ifp
, XFS_IEXT_BUFSZ
);
4346 erp
->er_extbuf
= ifp
->if_u1
.if_extents
;
4347 erp
->er_extcount
= nextents
;
4350 ifp
->if_flags
|= XFS_IFEXTIREC
;
4351 ifp
->if_real_bytes
= XFS_IEXT_BUFSZ
;
4352 ifp
->if_bytes
= nextents
* sizeof(xfs_bmbt_rec_t
);
4353 ifp
->if_u1
.if_ext_irec
= erp
;
4359 * Allocate and initialize a new entry in the indirection array.
4363 xfs_ifork_t
*ifp
, /* inode fork pointer */
4364 int erp_idx
) /* index for new irec */
4366 xfs_ext_irec_t
*erp
; /* indirection array pointer */
4367 int i
; /* loop counter */
4368 int nlists
; /* number of irec's (ex lists) */
4370 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4371 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4373 /* Resize indirection array */
4374 xfs_iext_realloc_indirect(ifp
, ++nlists
*
4375 sizeof(xfs_ext_irec_t
));
4377 * Move records down in the array so the
4378 * new page can use erp_idx.
4380 erp
= ifp
->if_u1
.if_ext_irec
;
4381 for (i
= nlists
- 1; i
> erp_idx
; i
--) {
4382 memmove(&erp
[i
], &erp
[i
-1], sizeof(xfs_ext_irec_t
));
4384 ASSERT(i
== erp_idx
);
4386 /* Initialize new extent record */
4387 erp
= ifp
->if_u1
.if_ext_irec
;
4388 erp
[erp_idx
].er_extbuf
= kmem_alloc(XFS_IEXT_BUFSZ
, KM_NOFS
);
4389 ifp
->if_real_bytes
= nlists
* XFS_IEXT_BUFSZ
;
4390 memset(erp
[erp_idx
].er_extbuf
, 0, XFS_IEXT_BUFSZ
);
4391 erp
[erp_idx
].er_extcount
= 0;
4392 erp
[erp_idx
].er_extoff
= erp_idx
> 0 ?
4393 erp
[erp_idx
-1].er_extoff
+ erp
[erp_idx
-1].er_extcount
: 0;
4394 return (&erp
[erp_idx
]);
4398 * Remove a record from the indirection array.
4401 xfs_iext_irec_remove(
4402 xfs_ifork_t
*ifp
, /* inode fork pointer */
4403 int erp_idx
) /* irec index to remove */
4405 xfs_ext_irec_t
*erp
; /* indirection array pointer */
4406 int i
; /* loop counter */
4407 int nlists
; /* number of irec's (ex lists) */
4409 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4410 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4411 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
4412 if (erp
->er_extbuf
) {
4413 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1,
4415 kmem_free(erp
->er_extbuf
);
4417 /* Compact extent records */
4418 erp
= ifp
->if_u1
.if_ext_irec
;
4419 for (i
= erp_idx
; i
< nlists
- 1; i
++) {
4420 memmove(&erp
[i
], &erp
[i
+1], sizeof(xfs_ext_irec_t
));
4423 * Manually free the last extent record from the indirection
4424 * array. A call to xfs_iext_realloc_indirect() with a size
4425 * of zero would result in a call to xfs_iext_destroy() which
4426 * would in turn call this function again, creating a nasty
4430 xfs_iext_realloc_indirect(ifp
,
4431 nlists
* sizeof(xfs_ext_irec_t
));
4433 kmem_free(ifp
->if_u1
.if_ext_irec
);
4435 ifp
->if_real_bytes
= nlists
* XFS_IEXT_BUFSZ
;
4439 * This is called to clean up large amounts of unused memory allocated
4440 * by the indirection array. Before compacting anything though, verify
4441 * that the indirection array is still needed and switch back to the
4442 * linear extent list (or even the inline buffer) if possible. The
4443 * compaction policy is as follows:
4445 * Full Compaction: Extents fit into a single page (or inline buffer)
4446 * Full Compaction: Extents occupy less than 10% of allocated space
4447 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4448 * No Compaction: Extents occupy at least 50% of allocated space
4451 xfs_iext_irec_compact(
4452 xfs_ifork_t
*ifp
) /* inode fork pointer */
4454 xfs_extnum_t nextents
; /* number of extents in file */
4455 int nlists
; /* number of irec's (ex lists) */
4457 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4458 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4459 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
4461 if (nextents
== 0) {
4462 xfs_iext_destroy(ifp
);
4463 } else if (nextents
<= XFS_INLINE_EXTS
) {
4464 xfs_iext_indirect_to_direct(ifp
);
4465 xfs_iext_direct_to_inline(ifp
, nextents
);
4466 } else if (nextents
<= XFS_LINEAR_EXTS
) {
4467 xfs_iext_indirect_to_direct(ifp
);
4468 } else if (nextents
< (nlists
* XFS_LINEAR_EXTS
) >> 3) {
4469 xfs_iext_irec_compact_full(ifp
);
4470 } else if (nextents
< (nlists
* XFS_LINEAR_EXTS
) >> 1) {
4471 xfs_iext_irec_compact_pages(ifp
);
4476 * Combine extents from neighboring extent pages.
4479 xfs_iext_irec_compact_pages(
4480 xfs_ifork_t
*ifp
) /* inode fork pointer */
4482 xfs_ext_irec_t
*erp
, *erp_next
;/* pointers to irec entries */
4483 int erp_idx
= 0; /* indirection array index */
4484 int nlists
; /* number of irec's (ex lists) */
4486 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4487 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4488 while (erp_idx
< nlists
- 1) {
4489 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
4491 if (erp_next
->er_extcount
<=
4492 (XFS_LINEAR_EXTS
- erp
->er_extcount
)) {
4493 memmove(&erp
->er_extbuf
[erp
->er_extcount
],
4494 erp_next
->er_extbuf
, erp_next
->er_extcount
*
4495 sizeof(xfs_bmbt_rec_t
));
4496 erp
->er_extcount
+= erp_next
->er_extcount
;
4498 * Free page before removing extent record
4499 * so er_extoffs don't get modified in
4500 * xfs_iext_irec_remove.
4502 kmem_free(erp_next
->er_extbuf
);
4503 erp_next
->er_extbuf
= NULL
;
4504 xfs_iext_irec_remove(ifp
, erp_idx
+ 1);
4505 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4513 * Fully compact the extent records managed by the indirection array.
4516 xfs_iext_irec_compact_full(
4517 xfs_ifork_t
*ifp
) /* inode fork pointer */
4519 xfs_bmbt_rec_host_t
*ep
, *ep_next
; /* extent record pointers */
4520 xfs_ext_irec_t
*erp
, *erp_next
; /* extent irec pointers */
4521 int erp_idx
= 0; /* extent irec index */
4522 int ext_avail
; /* empty entries in ex list */
4523 int ext_diff
; /* number of exts to add */
4524 int nlists
; /* number of irec's (ex lists) */
4526 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4528 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4529 erp
= ifp
->if_u1
.if_ext_irec
;
4530 ep
= &erp
->er_extbuf
[erp
->er_extcount
];
4532 ep_next
= erp_next
->er_extbuf
;
4534 while (erp_idx
< nlists
- 1) {
4536 * Check how many extent records are available in this irec.
4537 * If there is none skip the whole exercise.
4539 ext_avail
= XFS_LINEAR_EXTS
- erp
->er_extcount
;
4543 * Copy over as many as possible extent records into
4544 * the previous page.
4546 ext_diff
= MIN(ext_avail
, erp_next
->er_extcount
);
4547 memcpy(ep
, ep_next
, ext_diff
* sizeof(xfs_bmbt_rec_t
));
4548 erp
->er_extcount
+= ext_diff
;
4549 erp_next
->er_extcount
-= ext_diff
;
4552 * If the next irec is empty now we can simply
4555 if (erp_next
->er_extcount
== 0) {
4557 * Free page before removing extent record
4558 * so er_extoffs don't get modified in
4559 * xfs_iext_irec_remove.
4561 kmem_free(erp_next
->er_extbuf
);
4562 erp_next
->er_extbuf
= NULL
;
4563 xfs_iext_irec_remove(ifp
, erp_idx
+ 1);
4564 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
4565 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4568 * If the next irec is not empty move up the content
4569 * that has not been copied to the previous page to
4570 * the beggining of this one.
4573 memmove(erp_next
->er_extbuf
, &ep_next
[ext_diff
],
4574 erp_next
->er_extcount
*
4575 sizeof(xfs_bmbt_rec_t
));
4576 ep_next
= erp_next
->er_extbuf
;
4577 memset(&ep_next
[erp_next
->er_extcount
], 0,
4579 erp_next
->er_extcount
) *
4580 sizeof(xfs_bmbt_rec_t
));
4584 if (erp
->er_extcount
== XFS_LINEAR_EXTS
) {
4586 if (erp_idx
< nlists
)
4587 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
4591 ep
= &erp
->er_extbuf
[erp
->er_extcount
];
4593 ep_next
= erp_next
->er_extbuf
;
4598 * This is called to update the er_extoff field in the indirection
4599 * array when extents have been added or removed from one of the
4600 * extent lists. erp_idx contains the irec index to begin updating
4601 * at and ext_diff contains the number of extents that were added
4605 xfs_iext_irec_update_extoffs(
4606 xfs_ifork_t
*ifp
, /* inode fork pointer */
4607 int erp_idx
, /* irec index to update */
4608 int ext_diff
) /* number of new extents */
4610 int i
; /* loop counter */
4611 int nlists
; /* number of irec's (ex lists */
4613 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
4614 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
4615 for (i
= erp_idx
; i
< nlists
; i
++) {
4616 ifp
->if_u1
.if_ext_irec
[i
].er_extoff
+= ext_diff
;