2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_ialloc_btree.h"
29 #include "xfs_dinode.h"
30 #include "xfs_inode.h"
31 #include "xfs_error.h"
32 #include "xfs_cksum.h"
33 #include "xfs_icache.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_trans.h"
38 * Check that none of the inode's in the buffer have a next
39 * unlinked field of 0.
51 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
53 for (i
= 0; i
< j
; i
++) {
54 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
55 i
* mp
->m_sb
.sb_inodesize
);
56 if (!dip
->di_next_unlinked
) {
58 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
59 i
, (long long)bp
->b_bn
);
66 * If we are doing readahead on an inode buffer, we might be in log recovery
67 * reading an inode allocation buffer that hasn't yet been replayed, and hence
68 * has not had the inode cores stamped into it. Hence for readahead, the buffer
69 * may be potentially invalid.
71 * If the readahead buffer is invalid, we don't want to mark it with an error,
72 * but we do want to clear the DONE status of the buffer so that a followup read
73 * will re-read it from disk. This will ensure that we don't get an unnecessary
74 * warnings during log recovery and we don't get unnecssary panics on debug
82 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
87 * Validate the magic number and version of every inode in the buffer
89 ni
= XFS_BB_TO_FSB(mp
, bp
->b_length
) * mp
->m_sb
.sb_inopblock
;
90 for (i
= 0; i
< ni
; i
++) {
94 dip
= (struct xfs_dinode
*)xfs_buf_offset(bp
,
95 (i
<< mp
->m_sb
.sb_inodelog
));
96 di_ok
= dip
->di_magic
== cpu_to_be16(XFS_DINODE_MAGIC
) &&
97 XFS_DINODE_GOOD_VERSION(dip
->di_version
);
98 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
99 XFS_ERRTAG_ITOBP_INOTOBP
,
100 XFS_RANDOM_ITOBP_INOTOBP
))) {
102 bp
->b_flags
&= ~XBF_DONE
;
106 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
107 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_HIGH
,
111 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
112 (unsigned long long)bp
->b_bn
, i
,
113 be16_to_cpu(dip
->di_magic
));
117 xfs_inobp_check(mp
, bp
);
122 xfs_inode_buf_read_verify(
125 xfs_inode_buf_verify(bp
, false);
129 xfs_inode_buf_readahead_verify(
132 xfs_inode_buf_verify(bp
, true);
136 xfs_inode_buf_write_verify(
139 xfs_inode_buf_verify(bp
, false);
142 const struct xfs_buf_ops xfs_inode_buf_ops
= {
143 .verify_read
= xfs_inode_buf_read_verify
,
144 .verify_write
= xfs_inode_buf_write_verify
,
147 const struct xfs_buf_ops xfs_inode_buf_ra_ops
= {
148 .verify_read
= xfs_inode_buf_readahead_verify
,
149 .verify_write
= xfs_inode_buf_write_verify
,
154 * This routine is called to map an inode to the buffer containing the on-disk
155 * version of the inode. It returns a pointer to the buffer containing the
156 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
157 * pointer to the on-disk inode within that buffer.
159 * If a non-zero error is returned, then the contents of bpp and dipp are
164 struct xfs_mount
*mp
,
165 struct xfs_trans
*tp
,
166 struct xfs_imap
*imap
,
167 struct xfs_dinode
**dipp
,
168 struct xfs_buf
**bpp
,
175 buf_flags
|= XBF_UNMAPPED
;
176 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
177 (int)imap
->im_len
, buf_flags
, &bp
,
180 if (error
== EAGAIN
) {
181 ASSERT(buf_flags
& XBF_TRYLOCK
);
185 if (error
== EFSCORRUPTED
&&
186 (iget_flags
& XFS_IGET_UNTRUSTED
))
187 return XFS_ERROR(EINVAL
);
189 xfs_warn(mp
, "%s: xfs_trans_read_buf() returned error %d.",
195 *dipp
= (struct xfs_dinode
*)xfs_buf_offset(bp
, imap
->im_boffset
);
200 xfs_dinode_from_disk(
204 to
->di_magic
= be16_to_cpu(from
->di_magic
);
205 to
->di_mode
= be16_to_cpu(from
->di_mode
);
206 to
->di_version
= from
->di_version
;
207 to
->di_format
= from
->di_format
;
208 to
->di_onlink
= be16_to_cpu(from
->di_onlink
);
209 to
->di_uid
= be32_to_cpu(from
->di_uid
);
210 to
->di_gid
= be32_to_cpu(from
->di_gid
);
211 to
->di_nlink
= be32_to_cpu(from
->di_nlink
);
212 to
->di_projid_lo
= be16_to_cpu(from
->di_projid_lo
);
213 to
->di_projid_hi
= be16_to_cpu(from
->di_projid_hi
);
214 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
215 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
216 to
->di_atime
.t_sec
= be32_to_cpu(from
->di_atime
.t_sec
);
217 to
->di_atime
.t_nsec
= be32_to_cpu(from
->di_atime
.t_nsec
);
218 to
->di_mtime
.t_sec
= be32_to_cpu(from
->di_mtime
.t_sec
);
219 to
->di_mtime
.t_nsec
= be32_to_cpu(from
->di_mtime
.t_nsec
);
220 to
->di_ctime
.t_sec
= be32_to_cpu(from
->di_ctime
.t_sec
);
221 to
->di_ctime
.t_nsec
= be32_to_cpu(from
->di_ctime
.t_nsec
);
222 to
->di_size
= be64_to_cpu(from
->di_size
);
223 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
224 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
225 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
226 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
227 to
->di_forkoff
= from
->di_forkoff
;
228 to
->di_aformat
= from
->di_aformat
;
229 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
230 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
231 to
->di_flags
= be16_to_cpu(from
->di_flags
);
232 to
->di_gen
= be32_to_cpu(from
->di_gen
);
234 if (to
->di_version
== 3) {
235 to
->di_changecount
= be64_to_cpu(from
->di_changecount
);
236 to
->di_crtime
.t_sec
= be32_to_cpu(from
->di_crtime
.t_sec
);
237 to
->di_crtime
.t_nsec
= be32_to_cpu(from
->di_crtime
.t_nsec
);
238 to
->di_flags2
= be64_to_cpu(from
->di_flags2
);
239 to
->di_ino
= be64_to_cpu(from
->di_ino
);
240 to
->di_lsn
= be64_to_cpu(from
->di_lsn
);
241 memcpy(to
->di_pad2
, from
->di_pad2
, sizeof(to
->di_pad2
));
242 uuid_copy(&to
->di_uuid
, &from
->di_uuid
);
249 xfs_icdinode_t
*from
)
251 to
->di_magic
= cpu_to_be16(from
->di_magic
);
252 to
->di_mode
= cpu_to_be16(from
->di_mode
);
253 to
->di_version
= from
->di_version
;
254 to
->di_format
= from
->di_format
;
255 to
->di_onlink
= cpu_to_be16(from
->di_onlink
);
256 to
->di_uid
= cpu_to_be32(from
->di_uid
);
257 to
->di_gid
= cpu_to_be32(from
->di_gid
);
258 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
259 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
260 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
261 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
262 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
263 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
264 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
265 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
266 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
267 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
268 to
->di_size
= cpu_to_be64(from
->di_size
);
269 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
270 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
271 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
272 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
273 to
->di_forkoff
= from
->di_forkoff
;
274 to
->di_aformat
= from
->di_aformat
;
275 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
276 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
277 to
->di_flags
= cpu_to_be16(from
->di_flags
);
278 to
->di_gen
= cpu_to_be32(from
->di_gen
);
280 if (from
->di_version
== 3) {
281 to
->di_changecount
= cpu_to_be64(from
->di_changecount
);
282 to
->di_crtime
.t_sec
= cpu_to_be32(from
->di_crtime
.t_sec
);
283 to
->di_crtime
.t_nsec
= cpu_to_be32(from
->di_crtime
.t_nsec
);
284 to
->di_flags2
= cpu_to_be64(from
->di_flags2
);
285 to
->di_ino
= cpu_to_be64(from
->di_ino
);
286 to
->di_lsn
= cpu_to_be64(from
->di_lsn
);
287 memcpy(to
->di_pad2
, from
->di_pad2
, sizeof(to
->di_pad2
));
288 uuid_copy(&to
->di_uuid
, &from
->di_uuid
);
289 to
->di_flushiter
= 0;
291 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
297 struct xfs_mount
*mp
,
298 struct xfs_inode
*ip
,
299 struct xfs_dinode
*dip
)
301 if (dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
))
304 /* only version 3 or greater inodes are extensively verified here */
305 if (dip
->di_version
< 3)
308 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
310 if (!xfs_verify_cksum((char *)dip
, mp
->m_sb
.sb_inodesize
,
311 offsetof(struct xfs_dinode
, di_crc
)))
313 if (be64_to_cpu(dip
->di_ino
) != ip
->i_ino
)
315 if (!uuid_equal(&dip
->di_uuid
, &mp
->m_sb
.sb_uuid
))
322 struct xfs_mount
*mp
,
323 struct xfs_dinode
*dip
)
327 if (dip
->di_version
< 3)
330 ASSERT(xfs_sb_version_hascrc(&mp
->m_sb
));
331 crc
= xfs_start_cksum((char *)dip
, mp
->m_sb
.sb_inodesize
,
332 offsetof(struct xfs_dinode
, di_crc
));
333 dip
->di_crc
= xfs_end_cksum(crc
);
337 * Read the disk inode attributes into the in-core inode structure.
339 * For version 5 superblocks, if we are initialising a new inode and we are not
340 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
341 * inode core with a random generation number. If we are keeping inodes around,
342 * we need to read the inode cluster to get the existing generation number off
343 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
344 * format) then log recovery is dependent on the di_flushiter field being
345 * initialised from the current on-disk value and hence we must also read the
360 * Fill in the location information in the in-core inode.
362 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, iget_flags
);
366 /* shortcut IO on inode allocation if possible */
367 if ((iget_flags
& XFS_IGET_CREATE
) &&
368 xfs_sb_version_hascrc(&mp
->m_sb
) &&
369 !(mp
->m_flags
& XFS_MOUNT_IKEEP
)) {
370 /* initialise the on-disk inode core */
371 memset(&ip
->i_d
, 0, sizeof(ip
->i_d
));
372 ip
->i_d
.di_magic
= XFS_DINODE_MAGIC
;
373 ip
->i_d
.di_gen
= prandom_u32();
374 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
375 ip
->i_d
.di_version
= 3;
376 ip
->i_d
.di_ino
= ip
->i_ino
;
377 uuid_copy(&ip
->i_d
.di_uuid
, &mp
->m_sb
.sb_uuid
);
379 ip
->i_d
.di_version
= 2;
384 * Get pointers to the on-disk inode and the buffer containing it.
386 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &dip
, &bp
, 0, iget_flags
);
390 /* even unallocated inodes are verified */
391 if (!xfs_dinode_verify(mp
, ip
, dip
)) {
392 xfs_alert(mp
, "%s: validation failed for inode %lld failed",
393 __func__
, ip
->i_ino
);
395 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, dip
);
396 error
= XFS_ERROR(EFSCORRUPTED
);
401 * If the on-disk inode is already linked to a directory
402 * entry, copy all of the inode into the in-core inode.
403 * xfs_iformat_fork() handles copying in the inode format
404 * specific information.
405 * Otherwise, just get the truly permanent information.
408 xfs_dinode_from_disk(&ip
->i_d
, dip
);
409 error
= xfs_iformat_fork(ip
, dip
);
412 xfs_alert(mp
, "%s: xfs_iformat() returned error %d",
419 * Partial initialisation of the in-core inode. Just the bits
420 * that xfs_ialloc won't overwrite or relies on being correct.
422 ip
->i_d
.di_magic
= be16_to_cpu(dip
->di_magic
);
423 ip
->i_d
.di_version
= dip
->di_version
;
424 ip
->i_d
.di_gen
= be32_to_cpu(dip
->di_gen
);
425 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_flushiter
);
427 if (dip
->di_version
== 3) {
428 ip
->i_d
.di_ino
= be64_to_cpu(dip
->di_ino
);
429 uuid_copy(&ip
->i_d
.di_uuid
, &dip
->di_uuid
);
433 * Make sure to pull in the mode here as well in
434 * case the inode is released without being used.
435 * This ensures that xfs_inactive() will see that
436 * the inode is already free and not try to mess
437 * with the uninitialized part of it.
443 * The inode format changed when we moved the link count and
444 * made it 32 bits long. If this is an old format inode,
445 * convert it in memory to look like a new one. If it gets
446 * flushed to disk we will convert back before flushing or
447 * logging it. We zero out the new projid field and the old link
448 * count field. We'll handle clearing the pad field (the remains
449 * of the old uuid field) when we actually convert the inode to
450 * the new format. We don't change the version number so that we
451 * can distinguish this from a real new format inode.
453 if (ip
->i_d
.di_version
== 1) {
454 ip
->i_d
.di_nlink
= ip
->i_d
.di_onlink
;
455 ip
->i_d
.di_onlink
= 0;
456 xfs_set_projid(ip
, 0);
459 ip
->i_delayed_blks
= 0;
462 * Mark the buffer containing the inode as something to keep
463 * around for a while. This helps to keep recently accessed
464 * meta-data in-core longer.
466 xfs_buf_set_ref(bp
, XFS_INO_REF
);
469 * Use xfs_trans_brelse() to release the buffer containing the on-disk
470 * inode, because it was acquired with xfs_trans_read_buf() in
471 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
472 * brelse(). If we're within a transaction, then xfs_trans_brelse()
473 * will only release the buffer if it is not dirty within the
474 * transaction. It will be OK to release the buffer in this case,
475 * because inodes on disk are never destroyed and we will be locking the
476 * new in-core inode before putting it in the cache where other
477 * processes can find it. Thus we don't have to worry about the inode
478 * being changed just because we released the buffer.
481 xfs_trans_brelse(tp
, bp
);