2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_format.h"
28 #include "xfs_da_btree.h"
30 #include "xfs_dir2_priv.h"
31 #include "xfs_inode.h"
32 #include "xfs_trans.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_alloc.h"
37 #include "xfs_attr_leaf.h"
38 #include "xfs_error.h"
39 #include "xfs_trace.h"
40 #include "xfs_cksum.h"
41 #include "xfs_buf_item.h"
46 * Routines to implement directories as Btrees of hashed names.
49 /*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
54 * Routines used for growing the Btree.
56 STATIC
int xfs_da3_root_split(xfs_da_state_t
*state
,
57 xfs_da_state_blk_t
*existing_root
,
58 xfs_da_state_blk_t
*new_child
);
59 STATIC
int xfs_da3_node_split(xfs_da_state_t
*state
,
60 xfs_da_state_blk_t
*existing_blk
,
61 xfs_da_state_blk_t
*split_blk
,
62 xfs_da_state_blk_t
*blk_to_add
,
65 STATIC
void xfs_da3_node_rebalance(xfs_da_state_t
*state
,
66 xfs_da_state_blk_t
*node_blk_1
,
67 xfs_da_state_blk_t
*node_blk_2
);
68 STATIC
void xfs_da3_node_add(xfs_da_state_t
*state
,
69 xfs_da_state_blk_t
*old_node_blk
,
70 xfs_da_state_blk_t
*new_node_blk
);
73 * Routines used for shrinking the Btree.
75 STATIC
int xfs_da3_root_join(xfs_da_state_t
*state
,
76 xfs_da_state_blk_t
*root_blk
);
77 STATIC
int xfs_da3_node_toosmall(xfs_da_state_t
*state
, int *retval
);
78 STATIC
void xfs_da3_node_remove(xfs_da_state_t
*state
,
79 xfs_da_state_blk_t
*drop_blk
);
80 STATIC
void xfs_da3_node_unbalance(xfs_da_state_t
*state
,
81 xfs_da_state_blk_t
*src_node_blk
,
82 xfs_da_state_blk_t
*dst_node_blk
);
87 STATIC
int xfs_da3_blk_unlink(xfs_da_state_t
*state
,
88 xfs_da_state_blk_t
*drop_blk
,
89 xfs_da_state_blk_t
*save_blk
);
92 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
95 * Allocate a dir-state structure.
96 * We don't put them on the stack since they're large.
99 xfs_da_state_alloc(void)
101 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
105 * Kill the altpath contents of a da-state structure.
108 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
112 for (i
= 0; i
< state
->altpath
.active
; i
++)
113 state
->altpath
.blk
[i
].bp
= NULL
;
114 state
->altpath
.active
= 0;
118 * Free a da-state structure.
121 xfs_da_state_free(xfs_da_state_t
*state
)
123 xfs_da_state_kill_altpath(state
);
125 memset((char *)state
, 0, sizeof(*state
));
127 kmem_zone_free(xfs_da_state_zone
, state
);
134 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
135 struct xfs_da_intnode
*hdr
= bp
->b_addr
;
136 struct xfs_da3_icnode_hdr ichdr
;
137 const struct xfs_dir_ops
*ops
;
139 ops
= xfs_dir_get_ops(mp
, NULL
);
141 ops
->node_hdr_from_disk(&ichdr
, hdr
);
143 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
144 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
146 if (ichdr
.magic
!= XFS_DA3_NODE_MAGIC
)
149 if (!uuid_equal(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_meta_uuid
))
151 if (be64_to_cpu(hdr3
->info
.blkno
) != bp
->b_bn
)
154 if (ichdr
.magic
!= XFS_DA_NODE_MAGIC
)
157 if (ichdr
.level
== 0)
159 if (ichdr
.level
> XFS_DA_NODE_MAXDEPTH
)
161 if (ichdr
.count
== 0)
165 * we don't know if the node is for and attribute or directory tree,
166 * so only fail if the count is outside both bounds
168 if (ichdr
.count
> mp
->m_dir_geo
->node_ents
&&
169 ichdr
.count
> mp
->m_attr_geo
->node_ents
)
172 /* XXX: hash order check? */
178 xfs_da3_node_write_verify(
181 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
182 struct xfs_buf_log_item
*bip
= bp
->b_fspriv
;
183 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
185 if (!xfs_da3_node_verify(bp
)) {
186 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
187 xfs_verifier_error(bp
);
191 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
195 hdr3
->info
.lsn
= cpu_to_be64(bip
->bli_item
.li_lsn
);
197 xfs_buf_update_cksum(bp
, XFS_DA3_NODE_CRC_OFF
);
201 * leaf/node format detection on trees is sketchy, so a node read can be done on
202 * leaf level blocks when detection identifies the tree as a node format tree
203 * incorrectly. In this case, we need to swap the verifier to match the correct
204 * format of the block being read.
207 xfs_da3_node_read_verify(
210 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
212 switch (be16_to_cpu(info
->magic
)) {
213 case XFS_DA3_NODE_MAGIC
:
214 if (!xfs_buf_verify_cksum(bp
, XFS_DA3_NODE_CRC_OFF
)) {
215 xfs_buf_ioerror(bp
, -EFSBADCRC
);
219 case XFS_DA_NODE_MAGIC
:
220 if (!xfs_da3_node_verify(bp
)) {
221 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
225 case XFS_ATTR_LEAF_MAGIC
:
226 case XFS_ATTR3_LEAF_MAGIC
:
227 bp
->b_ops
= &xfs_attr3_leaf_buf_ops
;
228 bp
->b_ops
->verify_read(bp
);
230 case XFS_DIR2_LEAFN_MAGIC
:
231 case XFS_DIR3_LEAFN_MAGIC
:
232 bp
->b_ops
= &xfs_dir3_leafn_buf_ops
;
233 bp
->b_ops
->verify_read(bp
);
236 xfs_buf_ioerror(bp
, -EFSCORRUPTED
);
241 xfs_verifier_error(bp
);
244 const struct xfs_buf_ops xfs_da3_node_buf_ops
= {
245 .verify_read
= xfs_da3_node_read_verify
,
246 .verify_write
= xfs_da3_node_write_verify
,
251 struct xfs_trans
*tp
,
252 struct xfs_inode
*dp
,
254 xfs_daddr_t mappedbno
,
255 struct xfs_buf
**bpp
,
260 err
= xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
261 which_fork
, &xfs_da3_node_buf_ops
);
263 struct xfs_da_blkinfo
*info
= (*bpp
)->b_addr
;
266 switch (be16_to_cpu(info
->magic
)) {
267 case XFS_DA_NODE_MAGIC
:
268 case XFS_DA3_NODE_MAGIC
:
269 type
= XFS_BLFT_DA_NODE_BUF
;
271 case XFS_ATTR_LEAF_MAGIC
:
272 case XFS_ATTR3_LEAF_MAGIC
:
273 type
= XFS_BLFT_ATTR_LEAF_BUF
;
275 case XFS_DIR2_LEAFN_MAGIC
:
276 case XFS_DIR3_LEAFN_MAGIC
:
277 type
= XFS_BLFT_DIR_LEAFN_BUF
;
284 xfs_trans_buf_set_type(tp
, *bpp
, type
);
289 /*========================================================================
290 * Routines used for growing the Btree.
291 *========================================================================*/
294 * Create the initial contents of an intermediate node.
298 struct xfs_da_args
*args
,
301 struct xfs_buf
**bpp
,
304 struct xfs_da_intnode
*node
;
305 struct xfs_trans
*tp
= args
->trans
;
306 struct xfs_mount
*mp
= tp
->t_mountp
;
307 struct xfs_da3_icnode_hdr ichdr
= {0};
310 struct xfs_inode
*dp
= args
->dp
;
312 trace_xfs_da_node_create(args
);
313 ASSERT(level
<= XFS_DA_NODE_MAXDEPTH
);
315 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, whichfork
);
318 bp
->b_ops
= &xfs_da3_node_buf_ops
;
319 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
322 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
323 struct xfs_da3_node_hdr
*hdr3
= bp
->b_addr
;
325 ichdr
.magic
= XFS_DA3_NODE_MAGIC
;
326 hdr3
->info
.blkno
= cpu_to_be64(bp
->b_bn
);
327 hdr3
->info
.owner
= cpu_to_be64(args
->dp
->i_ino
);
328 uuid_copy(&hdr3
->info
.uuid
, &mp
->m_sb
.sb_meta_uuid
);
330 ichdr
.magic
= XFS_DA_NODE_MAGIC
;
334 dp
->d_ops
->node_hdr_to_disk(node
, &ichdr
);
335 xfs_trans_log_buf(tp
, bp
,
336 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
343 * Split a leaf node, rebalance, then possibly split
344 * intermediate nodes, rebalance, etc.
348 struct xfs_da_state
*state
)
350 struct xfs_da_state_blk
*oldblk
;
351 struct xfs_da_state_blk
*newblk
;
352 struct xfs_da_state_blk
*addblk
;
353 struct xfs_da_intnode
*node
;
360 trace_xfs_da_split(state
->args
);
363 * Walk back up the tree splitting/inserting/adjusting as necessary.
364 * If we need to insert and there isn't room, split the node, then
365 * decide which fragment to insert the new block from below into.
366 * Note that we may split the root this way, but we need more fixup.
368 max
= state
->path
.active
- 1;
369 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
370 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
371 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
373 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
374 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
375 oldblk
= &state
->path
.blk
[i
];
376 newblk
= &state
->altpath
.blk
[i
];
379 * If a leaf node then
380 * Allocate a new leaf node, then rebalance across them.
381 * else if an intermediate node then
382 * We split on the last layer, must we split the node?
384 switch (oldblk
->magic
) {
385 case XFS_ATTR_LEAF_MAGIC
:
386 error
= xfs_attr3_leaf_split(state
, oldblk
, newblk
);
387 if ((error
!= 0) && (error
!= -ENOSPC
)) {
388 return error
; /* GROT: attr is inconsistent */
395 * Entry wouldn't fit, split the leaf again.
397 state
->extravalid
= 1;
399 state
->extraafter
= 0; /* before newblk */
400 trace_xfs_attr_leaf_split_before(state
->args
);
401 error
= xfs_attr3_leaf_split(state
, oldblk
,
404 state
->extraafter
= 1; /* after newblk */
405 trace_xfs_attr_leaf_split_after(state
->args
);
406 error
= xfs_attr3_leaf_split(state
, newblk
,
410 return error
; /* GROT: attr inconsistent */
413 case XFS_DIR2_LEAFN_MAGIC
:
414 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
419 case XFS_DA_NODE_MAGIC
:
420 error
= xfs_da3_node_split(state
, oldblk
, newblk
, addblk
,
424 return error
; /* GROT: dir is inconsistent */
426 * Record the newly split block for the next time thru?
436 * Update the btree to show the new hashval for this child.
438 xfs_da3_fixhashpath(state
, &state
->path
);
444 * Split the root node.
446 ASSERT(state
->path
.active
== 0);
447 oldblk
= &state
->path
.blk
[0];
448 error
= xfs_da3_root_split(state
, oldblk
, addblk
);
451 return error
; /* GROT: dir is inconsistent */
455 * Update pointers to the node which used to be block 0 and
456 * just got bumped because of the addition of a new root node.
457 * There might be three blocks involved if a double split occurred,
458 * and the original block 0 could be at any position in the list.
460 * Note: the magic numbers and sibling pointers are in the same
461 * physical place for both v2 and v3 headers (by design). Hence it
462 * doesn't matter which version of the xfs_da_intnode structure we use
463 * here as the result will be the same using either structure.
465 node
= oldblk
->bp
->b_addr
;
466 if (node
->hdr
.info
.forw
) {
467 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
470 ASSERT(state
->extravalid
);
471 bp
= state
->extrablk
.bp
;
474 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
475 xfs_trans_log_buf(state
->args
->trans
, bp
,
476 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
477 sizeof(node
->hdr
.info
)));
479 node
= oldblk
->bp
->b_addr
;
480 if (node
->hdr
.info
.back
) {
481 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
484 ASSERT(state
->extravalid
);
485 bp
= state
->extrablk
.bp
;
488 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
489 xfs_trans_log_buf(state
->args
->trans
, bp
,
490 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
491 sizeof(node
->hdr
.info
)));
498 * Split the root. We have to create a new root and point to the two
499 * parts (the split old root) that we just created. Copy block zero to
500 * the EOF, extending the inode in process.
502 STATIC
int /* error */
504 struct xfs_da_state
*state
,
505 struct xfs_da_state_blk
*blk1
,
506 struct xfs_da_state_blk
*blk2
)
508 struct xfs_da_intnode
*node
;
509 struct xfs_da_intnode
*oldroot
;
510 struct xfs_da_node_entry
*btree
;
511 struct xfs_da3_icnode_hdr nodehdr
;
512 struct xfs_da_args
*args
;
514 struct xfs_inode
*dp
;
515 struct xfs_trans
*tp
;
516 struct xfs_dir2_leaf
*leaf
;
522 trace_xfs_da_root_split(state
->args
);
525 * Copy the existing (incorrect) block from the root node position
526 * to a free space somewhere.
529 error
= xfs_da_grow_inode(args
, &blkno
);
535 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
539 oldroot
= blk1
->bp
->b_addr
;
540 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
541 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
)) {
542 struct xfs_da3_icnode_hdr icnodehdr
;
544 dp
->d_ops
->node_hdr_from_disk(&icnodehdr
, oldroot
);
545 btree
= dp
->d_ops
->node_tree_p(oldroot
);
546 size
= (int)((char *)&btree
[icnodehdr
.count
] - (char *)oldroot
);
547 level
= icnodehdr
.level
;
550 * we are about to copy oldroot to bp, so set up the type
551 * of bp while we know exactly what it will be.
553 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DA_NODE_BUF
);
555 struct xfs_dir3_icleaf_hdr leafhdr
;
556 struct xfs_dir2_leaf_entry
*ents
;
558 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
559 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, leaf
);
560 ents
= dp
->d_ops
->leaf_ents_p(leaf
);
562 ASSERT(leafhdr
.magic
== XFS_DIR2_LEAFN_MAGIC
||
563 leafhdr
.magic
== XFS_DIR3_LEAFN_MAGIC
);
564 size
= (int)((char *)&ents
[leafhdr
.count
] - (char *)leaf
);
568 * we are about to copy oldroot to bp, so set up the type
569 * of bp while we know exactly what it will be.
571 xfs_trans_buf_set_type(tp
, bp
, XFS_BLFT_DIR_LEAFN_BUF
);
575 * we can copy most of the information in the node from one block to
576 * another, but for CRC enabled headers we have to make sure that the
577 * block specific identifiers are kept intact. We update the buffer
580 memcpy(node
, oldroot
, size
);
581 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
582 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
583 struct xfs_da3_intnode
*node3
= (struct xfs_da3_intnode
*)node
;
585 node3
->hdr
.info
.blkno
= cpu_to_be64(bp
->b_bn
);
587 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
589 bp
->b_ops
= blk1
->bp
->b_ops
;
590 xfs_trans_buf_copy_type(bp
, blk1
->bp
);
595 * Set up the new root node.
597 error
= xfs_da3_node_create(args
,
598 (args
->whichfork
== XFS_DATA_FORK
) ? args
->geo
->leafblk
: 0,
599 level
+ 1, &bp
, args
->whichfork
);
604 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
605 btree
= dp
->d_ops
->node_tree_p(node
);
606 btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
607 btree
[0].before
= cpu_to_be32(blk1
->blkno
);
608 btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
609 btree
[1].before
= cpu_to_be32(blk2
->blkno
);
611 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
614 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
615 oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
616 ASSERT(blk1
->blkno
>= args
->geo
->leafblk
&&
617 blk1
->blkno
< args
->geo
->freeblk
);
618 ASSERT(blk2
->blkno
>= args
->geo
->leafblk
&&
619 blk2
->blkno
< args
->geo
->freeblk
);
623 /* Header is already logged by xfs_da_node_create */
624 xfs_trans_log_buf(tp
, bp
,
625 XFS_DA_LOGRANGE(node
, btree
, sizeof(xfs_da_node_entry_t
) * 2));
631 * Split the node, rebalance, then add the new entry.
633 STATIC
int /* error */
635 struct xfs_da_state
*state
,
636 struct xfs_da_state_blk
*oldblk
,
637 struct xfs_da_state_blk
*newblk
,
638 struct xfs_da_state_blk
*addblk
,
642 struct xfs_da_intnode
*node
;
643 struct xfs_da3_icnode_hdr nodehdr
;
648 struct xfs_inode
*dp
= state
->args
->dp
;
650 trace_xfs_da_node_split(state
->args
);
652 node
= oldblk
->bp
->b_addr
;
653 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
656 * With V2 dirs the extra block is data or freespace.
658 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
659 newcount
= 1 + useextra
;
661 * Do we have to split the node?
663 if (nodehdr
.count
+ newcount
> state
->args
->geo
->node_ents
) {
665 * Allocate a new node, add to the doubly linked chain of
666 * nodes, then move some of our excess entries into it.
668 error
= xfs_da_grow_inode(state
->args
, &blkno
);
670 return error
; /* GROT: dir is inconsistent */
672 error
= xfs_da3_node_create(state
->args
, blkno
, treelevel
,
673 &newblk
->bp
, state
->args
->whichfork
);
675 return error
; /* GROT: dir is inconsistent */
676 newblk
->blkno
= blkno
;
677 newblk
->magic
= XFS_DA_NODE_MAGIC
;
678 xfs_da3_node_rebalance(state
, oldblk
, newblk
);
679 error
= xfs_da3_blk_link(state
, oldblk
, newblk
);
688 * Insert the new entry(s) into the correct block
689 * (updating last hashval in the process).
691 * xfs_da3_node_add() inserts BEFORE the given index,
692 * and as a result of using node_lookup_int() we always
693 * point to a valid entry (not after one), but a split
694 * operation always results in a new block whose hashvals
695 * FOLLOW the current block.
697 * If we had double-split op below us, then add the extra block too.
699 node
= oldblk
->bp
->b_addr
;
700 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
701 if (oldblk
->index
<= nodehdr
.count
) {
703 xfs_da3_node_add(state
, oldblk
, addblk
);
705 if (state
->extraafter
)
707 xfs_da3_node_add(state
, oldblk
, &state
->extrablk
);
708 state
->extravalid
= 0;
712 xfs_da3_node_add(state
, newblk
, addblk
);
714 if (state
->extraafter
)
716 xfs_da3_node_add(state
, newblk
, &state
->extrablk
);
717 state
->extravalid
= 0;
725 * Balance the btree elements between two intermediate nodes,
726 * usually one full and one empty.
728 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
731 xfs_da3_node_rebalance(
732 struct xfs_da_state
*state
,
733 struct xfs_da_state_blk
*blk1
,
734 struct xfs_da_state_blk
*blk2
)
736 struct xfs_da_intnode
*node1
;
737 struct xfs_da_intnode
*node2
;
738 struct xfs_da_intnode
*tmpnode
;
739 struct xfs_da_node_entry
*btree1
;
740 struct xfs_da_node_entry
*btree2
;
741 struct xfs_da_node_entry
*btree_s
;
742 struct xfs_da_node_entry
*btree_d
;
743 struct xfs_da3_icnode_hdr nodehdr1
;
744 struct xfs_da3_icnode_hdr nodehdr2
;
745 struct xfs_trans
*tp
;
749 struct xfs_inode
*dp
= state
->args
->dp
;
751 trace_xfs_da_node_rebalance(state
->args
);
753 node1
= blk1
->bp
->b_addr
;
754 node2
= blk2
->bp
->b_addr
;
755 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
756 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
757 btree1
= dp
->d_ops
->node_tree_p(node1
);
758 btree2
= dp
->d_ops
->node_tree_p(node2
);
761 * Figure out how many entries need to move, and in which direction.
762 * Swap the nodes around if that makes it simpler.
764 if (nodehdr1
.count
> 0 && nodehdr2
.count
> 0 &&
765 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
766 (be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
) <
767 be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
)))) {
771 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
772 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
773 btree1
= dp
->d_ops
->node_tree_p(node1
);
774 btree2
= dp
->d_ops
->node_tree_p(node2
);
778 count
= (nodehdr1
.count
- nodehdr2
.count
) / 2;
781 tp
= state
->args
->trans
;
783 * Two cases: high-to-low and low-to-high.
787 * Move elements in node2 up to make a hole.
789 tmp
= nodehdr2
.count
;
791 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
792 btree_s
= &btree2
[0];
793 btree_d
= &btree2
[count
];
794 memmove(btree_d
, btree_s
, tmp
);
798 * Move the req'd B-tree elements from high in node1 to
801 nodehdr2
.count
+= count
;
802 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
803 btree_s
= &btree1
[nodehdr1
.count
- count
];
804 btree_d
= &btree2
[0];
805 memcpy(btree_d
, btree_s
, tmp
);
806 nodehdr1
.count
-= count
;
809 * Move the req'd B-tree elements from low in node2 to
813 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
814 btree_s
= &btree2
[0];
815 btree_d
= &btree1
[nodehdr1
.count
];
816 memcpy(btree_d
, btree_s
, tmp
);
817 nodehdr1
.count
+= count
;
819 xfs_trans_log_buf(tp
, blk1
->bp
,
820 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
823 * Move elements in node2 down to fill the hole.
825 tmp
= nodehdr2
.count
- count
;
826 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
827 btree_s
= &btree2
[count
];
828 btree_d
= &btree2
[0];
829 memmove(btree_d
, btree_s
, tmp
);
830 nodehdr2
.count
-= count
;
834 * Log header of node 1 and all current bits of node 2.
836 dp
->d_ops
->node_hdr_to_disk(node1
, &nodehdr1
);
837 xfs_trans_log_buf(tp
, blk1
->bp
,
838 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, dp
->d_ops
->node_hdr_size
));
840 dp
->d_ops
->node_hdr_to_disk(node2
, &nodehdr2
);
841 xfs_trans_log_buf(tp
, blk2
->bp
,
842 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
843 dp
->d_ops
->node_hdr_size
+
844 (sizeof(btree2
[0]) * nodehdr2
.count
)));
847 * Record the last hashval from each block for upward propagation.
848 * (note: don't use the swapped node pointers)
851 node1
= blk1
->bp
->b_addr
;
852 node2
= blk2
->bp
->b_addr
;
853 dp
->d_ops
->node_hdr_from_disk(&nodehdr1
, node1
);
854 dp
->d_ops
->node_hdr_from_disk(&nodehdr2
, node2
);
855 btree1
= dp
->d_ops
->node_tree_p(node1
);
856 btree2
= dp
->d_ops
->node_tree_p(node2
);
858 blk1
->hashval
= be32_to_cpu(btree1
[nodehdr1
.count
- 1].hashval
);
859 blk2
->hashval
= be32_to_cpu(btree2
[nodehdr2
.count
- 1].hashval
);
862 * Adjust the expected index for insertion.
864 if (blk1
->index
>= nodehdr1
.count
) {
865 blk2
->index
= blk1
->index
- nodehdr1
.count
;
866 blk1
->index
= nodehdr1
.count
+ 1; /* make it invalid */
871 * Add a new entry to an intermediate node.
875 struct xfs_da_state
*state
,
876 struct xfs_da_state_blk
*oldblk
,
877 struct xfs_da_state_blk
*newblk
)
879 struct xfs_da_intnode
*node
;
880 struct xfs_da3_icnode_hdr nodehdr
;
881 struct xfs_da_node_entry
*btree
;
883 struct xfs_inode
*dp
= state
->args
->dp
;
885 trace_xfs_da_node_add(state
->args
);
887 node
= oldblk
->bp
->b_addr
;
888 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
889 btree
= dp
->d_ops
->node_tree_p(node
);
891 ASSERT(oldblk
->index
>= 0 && oldblk
->index
<= nodehdr
.count
);
892 ASSERT(newblk
->blkno
!= 0);
893 if (state
->args
->whichfork
== XFS_DATA_FORK
)
894 ASSERT(newblk
->blkno
>= state
->args
->geo
->leafblk
&&
895 newblk
->blkno
< state
->args
->geo
->freeblk
);
898 * We may need to make some room before we insert the new node.
901 if (oldblk
->index
< nodehdr
.count
) {
902 tmp
= (nodehdr
.count
- oldblk
->index
) * (uint
)sizeof(*btree
);
903 memmove(&btree
[oldblk
->index
+ 1], &btree
[oldblk
->index
], tmp
);
905 btree
[oldblk
->index
].hashval
= cpu_to_be32(newblk
->hashval
);
906 btree
[oldblk
->index
].before
= cpu_to_be32(newblk
->blkno
);
907 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
908 XFS_DA_LOGRANGE(node
, &btree
[oldblk
->index
],
909 tmp
+ sizeof(*btree
)));
912 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
913 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
914 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
917 * Copy the last hash value from the oldblk to propagate upwards.
919 oldblk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
922 /*========================================================================
923 * Routines used for shrinking the Btree.
924 *========================================================================*/
927 * Deallocate an empty leaf node, remove it from its parent,
928 * possibly deallocating that block, etc...
932 struct xfs_da_state
*state
)
934 struct xfs_da_state_blk
*drop_blk
;
935 struct xfs_da_state_blk
*save_blk
;
939 trace_xfs_da_join(state
->args
);
941 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
942 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
943 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
944 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
945 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
948 * Walk back up the tree joining/deallocating as necessary.
949 * When we stop dropping blocks, break out.
951 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
952 state
->path
.active
--) {
954 * See if we can combine the block with a neighbor.
955 * (action == 0) => no options, just leave
956 * (action == 1) => coalesce, then unlink
957 * (action == 2) => block empty, unlink it
959 switch (drop_blk
->magic
) {
960 case XFS_ATTR_LEAF_MAGIC
:
961 error
= xfs_attr3_leaf_toosmall(state
, &action
);
966 xfs_attr3_leaf_unbalance(state
, drop_blk
, save_blk
);
968 case XFS_DIR2_LEAFN_MAGIC
:
969 error
= xfs_dir2_leafn_toosmall(state
, &action
);
974 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
976 case XFS_DA_NODE_MAGIC
:
978 * Remove the offending node, fixup hashvals,
979 * check for a toosmall neighbor.
981 xfs_da3_node_remove(state
, drop_blk
);
982 xfs_da3_fixhashpath(state
, &state
->path
);
983 error
= xfs_da3_node_toosmall(state
, &action
);
988 xfs_da3_node_unbalance(state
, drop_blk
, save_blk
);
991 xfs_da3_fixhashpath(state
, &state
->altpath
);
992 error
= xfs_da3_blk_unlink(state
, drop_blk
, save_blk
);
993 xfs_da_state_kill_altpath(state
);
996 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
1003 * We joined all the way to the top. If it turns out that
1004 * we only have one entry in the root, make the child block
1007 xfs_da3_node_remove(state
, drop_blk
);
1008 xfs_da3_fixhashpath(state
, &state
->path
);
1009 error
= xfs_da3_root_join(state
, &state
->path
.blk
[0]);
1015 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
1017 __be16 magic
= blkinfo
->magic
;
1020 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1021 magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1022 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1023 magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1025 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1026 magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
));
1028 ASSERT(!blkinfo
->forw
);
1029 ASSERT(!blkinfo
->back
);
1032 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1036 * We have only one entry in the root. Copy the only remaining child of
1037 * the old root to block 0 as the new root node.
1041 struct xfs_da_state
*state
,
1042 struct xfs_da_state_blk
*root_blk
)
1044 struct xfs_da_intnode
*oldroot
;
1045 struct xfs_da_args
*args
;
1048 struct xfs_da3_icnode_hdr oldroothdr
;
1049 struct xfs_da_node_entry
*btree
;
1051 struct xfs_inode
*dp
= state
->args
->dp
;
1053 trace_xfs_da_root_join(state
->args
);
1055 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
1058 oldroot
= root_blk
->bp
->b_addr
;
1059 dp
->d_ops
->node_hdr_from_disk(&oldroothdr
, oldroot
);
1060 ASSERT(oldroothdr
.forw
== 0);
1061 ASSERT(oldroothdr
.back
== 0);
1064 * If the root has more than one child, then don't do anything.
1066 if (oldroothdr
.count
> 1)
1070 * Read in the (only) child block, then copy those bytes into
1071 * the root block's buffer and free the original child block.
1073 btree
= dp
->d_ops
->node_tree_p(oldroot
);
1074 child
= be32_to_cpu(btree
[0].before
);
1076 error
= xfs_da3_node_read(args
->trans
, dp
, child
, -1, &bp
,
1080 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
, oldroothdr
.level
);
1083 * This could be copying a leaf back into the root block in the case of
1084 * there only being a single leaf block left in the tree. Hence we have
1085 * to update the b_ops pointer as well to match the buffer type change
1086 * that could occur. For dir3 blocks we also need to update the block
1087 * number in the buffer header.
1089 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, args
->geo
->blksize
);
1090 root_blk
->bp
->b_ops
= bp
->b_ops
;
1091 xfs_trans_buf_copy_type(root_blk
->bp
, bp
);
1092 if (oldroothdr
.magic
== XFS_DA3_NODE_MAGIC
) {
1093 struct xfs_da3_blkinfo
*da3
= root_blk
->bp
->b_addr
;
1094 da3
->blkno
= cpu_to_be64(root_blk
->bp
->b_bn
);
1096 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0,
1097 args
->geo
->blksize
- 1);
1098 error
= xfs_da_shrink_inode(args
, child
, bp
);
1103 * Check a node block and its neighbors to see if the block should be
1104 * collapsed into one or the other neighbor. Always keep the block
1105 * with the smaller block number.
1106 * If the current block is over 50% full, don't try to join it, return 0.
1107 * If the block is empty, fill in the state structure and return 2.
1108 * If it can be collapsed, fill in the state structure and return 1.
1109 * If nothing can be done, return 0.
1112 xfs_da3_node_toosmall(
1113 struct xfs_da_state
*state
,
1116 struct xfs_da_intnode
*node
;
1117 struct xfs_da_state_blk
*blk
;
1118 struct xfs_da_blkinfo
*info
;
1121 struct xfs_da3_icnode_hdr nodehdr
;
1127 struct xfs_inode
*dp
= state
->args
->dp
;
1129 trace_xfs_da_node_toosmall(state
->args
);
1132 * Check for the degenerate case of the block being over 50% full.
1133 * If so, it's not worth even looking to see if we might be able
1134 * to coalesce with a sibling.
1136 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
1137 info
= blk
->bp
->b_addr
;
1138 node
= (xfs_da_intnode_t
*)info
;
1139 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1140 if (nodehdr
.count
> (state
->args
->geo
->node_ents
>> 1)) {
1141 *action
= 0; /* blk over 50%, don't try to join */
1142 return 0; /* blk over 50%, don't try to join */
1146 * Check for the degenerate case of the block being empty.
1147 * If the block is empty, we'll simply delete it, no need to
1148 * coalesce it with a sibling block. We choose (arbitrarily)
1149 * to merge with the forward block unless it is NULL.
1151 if (nodehdr
.count
== 0) {
1153 * Make altpath point to the block we want to keep and
1154 * path point to the block we want to drop (this one).
1156 forward
= (info
->forw
!= 0);
1157 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1158 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1171 * Examine each sibling block to see if we can coalesce with
1172 * at least 25% free space to spare. We need to figure out
1173 * whether to merge with the forward or the backward block.
1174 * We prefer coalescing with the lower numbered sibling so as
1175 * to shrink a directory over time.
1177 count
= state
->args
->geo
->node_ents
;
1178 count
-= state
->args
->geo
->node_ents
>> 2;
1179 count
-= nodehdr
.count
;
1181 /* start with smaller blk num */
1182 forward
= nodehdr
.forw
< nodehdr
.back
;
1183 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
1184 struct xfs_da3_icnode_hdr thdr
;
1186 blkno
= nodehdr
.forw
;
1188 blkno
= nodehdr
.back
;
1191 error
= xfs_da3_node_read(state
->args
->trans
, dp
,
1192 blkno
, -1, &bp
, state
->args
->whichfork
);
1197 dp
->d_ops
->node_hdr_from_disk(&thdr
, node
);
1198 xfs_trans_brelse(state
->args
->trans
, bp
);
1200 if (count
- thdr
.count
>= 0)
1201 break; /* fits with at least 25% to spare */
1209 * Make altpath point to the block we want to keep (the lower
1210 * numbered block) and path point to the block we want to drop.
1212 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
1213 if (blkno
< blk
->blkno
) {
1214 error
= xfs_da3_path_shift(state
, &state
->altpath
, forward
,
1217 error
= xfs_da3_path_shift(state
, &state
->path
, forward
,
1231 * Pick up the last hashvalue from an intermediate node.
1234 xfs_da3_node_lasthash(
1235 struct xfs_inode
*dp
,
1239 struct xfs_da_intnode
*node
;
1240 struct xfs_da_node_entry
*btree
;
1241 struct xfs_da3_icnode_hdr nodehdr
;
1244 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1246 *count
= nodehdr
.count
;
1249 btree
= dp
->d_ops
->node_tree_p(node
);
1250 return be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1254 * Walk back up the tree adjusting hash values as necessary,
1255 * when we stop making changes, return.
1258 xfs_da3_fixhashpath(
1259 struct xfs_da_state
*state
,
1260 struct xfs_da_state_path
*path
)
1262 struct xfs_da_state_blk
*blk
;
1263 struct xfs_da_intnode
*node
;
1264 struct xfs_da_node_entry
*btree
;
1265 xfs_dahash_t lasthash
=0;
1268 struct xfs_inode
*dp
= state
->args
->dp
;
1270 trace_xfs_da_fixhashpath(state
->args
);
1272 level
= path
->active
-1;
1273 blk
= &path
->blk
[ level
];
1274 switch (blk
->magic
) {
1275 case XFS_ATTR_LEAF_MAGIC
:
1276 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
1280 case XFS_DIR2_LEAFN_MAGIC
:
1281 lasthash
= xfs_dir2_leafn_lasthash(dp
, blk
->bp
, &count
);
1285 case XFS_DA_NODE_MAGIC
:
1286 lasthash
= xfs_da3_node_lasthash(dp
, blk
->bp
, &count
);
1291 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1292 struct xfs_da3_icnode_hdr nodehdr
;
1294 node
= blk
->bp
->b_addr
;
1295 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1296 btree
= dp
->d_ops
->node_tree_p(node
);
1297 if (be32_to_cpu(btree
[blk
->index
].hashval
) == lasthash
)
1299 blk
->hashval
= lasthash
;
1300 btree
[blk
->index
].hashval
= cpu_to_be32(lasthash
);
1301 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1302 XFS_DA_LOGRANGE(node
, &btree
[blk
->index
],
1305 lasthash
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1310 * Remove an entry from an intermediate node.
1313 xfs_da3_node_remove(
1314 struct xfs_da_state
*state
,
1315 struct xfs_da_state_blk
*drop_blk
)
1317 struct xfs_da_intnode
*node
;
1318 struct xfs_da3_icnode_hdr nodehdr
;
1319 struct xfs_da_node_entry
*btree
;
1322 struct xfs_inode
*dp
= state
->args
->dp
;
1324 trace_xfs_da_node_remove(state
->args
);
1326 node
= drop_blk
->bp
->b_addr
;
1327 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1328 ASSERT(drop_blk
->index
< nodehdr
.count
);
1329 ASSERT(drop_blk
->index
>= 0);
1332 * Copy over the offending entry, or just zero it out.
1334 index
= drop_blk
->index
;
1335 btree
= dp
->d_ops
->node_tree_p(node
);
1336 if (index
< nodehdr
.count
- 1) {
1337 tmp
= nodehdr
.count
- index
- 1;
1338 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1339 memmove(&btree
[index
], &btree
[index
+ 1], tmp
);
1340 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1341 XFS_DA_LOGRANGE(node
, &btree
[index
], tmp
));
1342 index
= nodehdr
.count
- 1;
1344 memset(&btree
[index
], 0, sizeof(xfs_da_node_entry_t
));
1345 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1346 XFS_DA_LOGRANGE(node
, &btree
[index
], sizeof(btree
[index
])));
1348 dp
->d_ops
->node_hdr_to_disk(node
, &nodehdr
);
1349 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1350 XFS_DA_LOGRANGE(node
, &node
->hdr
, dp
->d_ops
->node_hdr_size
));
1353 * Copy the last hash value from the block to propagate upwards.
1355 drop_blk
->hashval
= be32_to_cpu(btree
[index
- 1].hashval
);
1359 * Unbalance the elements between two intermediate nodes,
1360 * move all Btree elements from one node into another.
1363 xfs_da3_node_unbalance(
1364 struct xfs_da_state
*state
,
1365 struct xfs_da_state_blk
*drop_blk
,
1366 struct xfs_da_state_blk
*save_blk
)
1368 struct xfs_da_intnode
*drop_node
;
1369 struct xfs_da_intnode
*save_node
;
1370 struct xfs_da_node_entry
*drop_btree
;
1371 struct xfs_da_node_entry
*save_btree
;
1372 struct xfs_da3_icnode_hdr drop_hdr
;
1373 struct xfs_da3_icnode_hdr save_hdr
;
1374 struct xfs_trans
*tp
;
1377 struct xfs_inode
*dp
= state
->args
->dp
;
1379 trace_xfs_da_node_unbalance(state
->args
);
1381 drop_node
= drop_blk
->bp
->b_addr
;
1382 save_node
= save_blk
->bp
->b_addr
;
1383 dp
->d_ops
->node_hdr_from_disk(&drop_hdr
, drop_node
);
1384 dp
->d_ops
->node_hdr_from_disk(&save_hdr
, save_node
);
1385 drop_btree
= dp
->d_ops
->node_tree_p(drop_node
);
1386 save_btree
= dp
->d_ops
->node_tree_p(save_node
);
1387 tp
= state
->args
->trans
;
1390 * If the dying block has lower hashvals, then move all the
1391 * elements in the remaining block up to make a hole.
1393 if ((be32_to_cpu(drop_btree
[0].hashval
) <
1394 be32_to_cpu(save_btree
[0].hashval
)) ||
1395 (be32_to_cpu(drop_btree
[drop_hdr
.count
- 1].hashval
) <
1396 be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
))) {
1397 /* XXX: check this - is memmove dst correct? */
1398 tmp
= save_hdr
.count
* sizeof(xfs_da_node_entry_t
);
1399 memmove(&save_btree
[drop_hdr
.count
], &save_btree
[0], tmp
);
1402 xfs_trans_log_buf(tp
, save_blk
->bp
,
1403 XFS_DA_LOGRANGE(save_node
, &save_btree
[0],
1404 (save_hdr
.count
+ drop_hdr
.count
) *
1405 sizeof(xfs_da_node_entry_t
)));
1407 sindex
= save_hdr
.count
;
1408 xfs_trans_log_buf(tp
, save_blk
->bp
,
1409 XFS_DA_LOGRANGE(save_node
, &save_btree
[sindex
],
1410 drop_hdr
.count
* sizeof(xfs_da_node_entry_t
)));
1414 * Move all the B-tree elements from drop_blk to save_blk.
1416 tmp
= drop_hdr
.count
* (uint
)sizeof(xfs_da_node_entry_t
);
1417 memcpy(&save_btree
[sindex
], &drop_btree
[0], tmp
);
1418 save_hdr
.count
+= drop_hdr
.count
;
1420 dp
->d_ops
->node_hdr_to_disk(save_node
, &save_hdr
);
1421 xfs_trans_log_buf(tp
, save_blk
->bp
,
1422 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1423 dp
->d_ops
->node_hdr_size
));
1426 * Save the last hashval in the remaining block for upward propagation.
1428 save_blk
->hashval
= be32_to_cpu(save_btree
[save_hdr
.count
- 1].hashval
);
1431 /*========================================================================
1432 * Routines used for finding things in the Btree.
1433 *========================================================================*/
1436 * Walk down the Btree looking for a particular filename, filling
1437 * in the state structure as we go.
1439 * We will set the state structure to point to each of the elements
1440 * in each of the nodes where either the hashval is or should be.
1442 * We support duplicate hashval's so for each entry in the current
1443 * node that could contain the desired hashval, descend. This is a
1444 * pruned depth-first tree search.
1447 xfs_da3_node_lookup_int(
1448 struct xfs_da_state
*state
,
1451 struct xfs_da_state_blk
*blk
;
1452 struct xfs_da_blkinfo
*curr
;
1453 struct xfs_da_intnode
*node
;
1454 struct xfs_da_node_entry
*btree
;
1455 struct xfs_da3_icnode_hdr nodehdr
;
1456 struct xfs_da_args
*args
;
1458 xfs_dahash_t hashval
;
1459 xfs_dahash_t btreehashval
;
1465 struct xfs_inode
*dp
= state
->args
->dp
;
1470 * Descend thru the B-tree searching each level for the right
1471 * node to use, until the right hashval is found.
1473 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? args
->geo
->leafblk
: 0;
1474 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1475 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1476 blk
++, state
->path
.active
++) {
1478 * Read the next node down in the tree.
1481 error
= xfs_da3_node_read(args
->trans
, args
->dp
, blkno
,
1482 -1, &blk
->bp
, args
->whichfork
);
1485 state
->path
.active
--;
1488 curr
= blk
->bp
->b_addr
;
1489 blk
->magic
= be16_to_cpu(curr
->magic
);
1491 if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1492 blk
->magic
== XFS_ATTR3_LEAF_MAGIC
) {
1493 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1494 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1498 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1499 blk
->magic
== XFS_DIR3_LEAFN_MAGIC
) {
1500 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1501 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1506 blk
->magic
= XFS_DA_NODE_MAGIC
;
1510 * Search an intermediate node for a match.
1512 node
= blk
->bp
->b_addr
;
1513 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1514 btree
= dp
->d_ops
->node_tree_p(node
);
1516 max
= nodehdr
.count
;
1517 blk
->hashval
= be32_to_cpu(btree
[max
- 1].hashval
);
1520 * Binary search. (note: small blocks will skip loop)
1522 probe
= span
= max
/ 2;
1523 hashval
= args
->hashval
;
1526 btreehashval
= be32_to_cpu(btree
[probe
].hashval
);
1527 if (btreehashval
< hashval
)
1529 else if (btreehashval
> hashval
)
1534 ASSERT((probe
>= 0) && (probe
< max
));
1535 ASSERT((span
<= 4) ||
1536 (be32_to_cpu(btree
[probe
].hashval
) == hashval
));
1539 * Since we may have duplicate hashval's, find the first
1540 * matching hashval in the node.
1543 be32_to_cpu(btree
[probe
].hashval
) >= hashval
) {
1546 while (probe
< max
&&
1547 be32_to_cpu(btree
[probe
].hashval
) < hashval
) {
1552 * Pick the right block to descend on.
1555 blk
->index
= max
- 1;
1556 blkno
= be32_to_cpu(btree
[max
- 1].before
);
1559 blkno
= be32_to_cpu(btree
[probe
].before
);
1564 * A leaf block that ends in the hashval that we are interested in
1565 * (final hashval == search hashval) means that the next block may
1566 * contain more entries with the same hashval, shift upward to the
1567 * next leaf and keep searching.
1570 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1571 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1572 &blk
->index
, state
);
1573 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1574 retval
= xfs_attr3_leaf_lookup_int(blk
->bp
, args
);
1575 blk
->index
= args
->index
;
1576 args
->blkno
= blk
->blkno
;
1579 return -EFSCORRUPTED
;
1581 if (((retval
== -ENOENT
) || (retval
== -ENOATTR
)) &&
1582 (blk
->hashval
== args
->hashval
)) {
1583 error
= xfs_da3_path_shift(state
, &state
->path
, 1, 1,
1589 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1590 /* path_shift() gives ENOENT */
1600 /*========================================================================
1602 *========================================================================*/
1605 * Compare two intermediate nodes for "order".
1609 struct xfs_inode
*dp
,
1610 struct xfs_buf
*node1_bp
,
1611 struct xfs_buf
*node2_bp
)
1613 struct xfs_da_intnode
*node1
;
1614 struct xfs_da_intnode
*node2
;
1615 struct xfs_da_node_entry
*btree1
;
1616 struct xfs_da_node_entry
*btree2
;
1617 struct xfs_da3_icnode_hdr node1hdr
;
1618 struct xfs_da3_icnode_hdr node2hdr
;
1620 node1
= node1_bp
->b_addr
;
1621 node2
= node2_bp
->b_addr
;
1622 dp
->d_ops
->node_hdr_from_disk(&node1hdr
, node1
);
1623 dp
->d_ops
->node_hdr_from_disk(&node2hdr
, node2
);
1624 btree1
= dp
->d_ops
->node_tree_p(node1
);
1625 btree2
= dp
->d_ops
->node_tree_p(node2
);
1627 if (node1hdr
.count
> 0 && node2hdr
.count
> 0 &&
1628 ((be32_to_cpu(btree2
[0].hashval
) < be32_to_cpu(btree1
[0].hashval
)) ||
1629 (be32_to_cpu(btree2
[node2hdr
.count
- 1].hashval
) <
1630 be32_to_cpu(btree1
[node1hdr
.count
- 1].hashval
)))) {
1637 * Link a new block into a doubly linked list of blocks (of whatever type).
1641 struct xfs_da_state
*state
,
1642 struct xfs_da_state_blk
*old_blk
,
1643 struct xfs_da_state_blk
*new_blk
)
1645 struct xfs_da_blkinfo
*old_info
;
1646 struct xfs_da_blkinfo
*new_info
;
1647 struct xfs_da_blkinfo
*tmp_info
;
1648 struct xfs_da_args
*args
;
1652 struct xfs_inode
*dp
= state
->args
->dp
;
1655 * Set up environment.
1658 ASSERT(args
!= NULL
);
1659 old_info
= old_blk
->bp
->b_addr
;
1660 new_info
= new_blk
->bp
->b_addr
;
1661 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1662 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1663 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1665 switch (old_blk
->magic
) {
1666 case XFS_ATTR_LEAF_MAGIC
:
1667 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1669 case XFS_DIR2_LEAFN_MAGIC
:
1670 before
= xfs_dir2_leafn_order(dp
, old_blk
->bp
, new_blk
->bp
);
1672 case XFS_DA_NODE_MAGIC
:
1673 before
= xfs_da3_node_order(dp
, old_blk
->bp
, new_blk
->bp
);
1678 * Link blocks in appropriate order.
1682 * Link new block in before existing block.
1684 trace_xfs_da_link_before(args
);
1685 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1686 new_info
->back
= old_info
->back
;
1687 if (old_info
->back
) {
1688 error
= xfs_da3_node_read(args
->trans
, dp
,
1689 be32_to_cpu(old_info
->back
),
1690 -1, &bp
, args
->whichfork
);
1694 tmp_info
= bp
->b_addr
;
1695 ASSERT(tmp_info
->magic
== old_info
->magic
);
1696 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1697 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1698 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1700 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1703 * Link new block in after existing block.
1705 trace_xfs_da_link_after(args
);
1706 new_info
->forw
= old_info
->forw
;
1707 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1708 if (old_info
->forw
) {
1709 error
= xfs_da3_node_read(args
->trans
, dp
,
1710 be32_to_cpu(old_info
->forw
),
1711 -1, &bp
, args
->whichfork
);
1715 tmp_info
= bp
->b_addr
;
1716 ASSERT(tmp_info
->magic
== old_info
->magic
);
1717 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1718 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1719 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1721 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1724 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1725 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1730 * Unlink a block from a doubly linked list of blocks.
1732 STATIC
int /* error */
1734 struct xfs_da_state
*state
,
1735 struct xfs_da_state_blk
*drop_blk
,
1736 struct xfs_da_state_blk
*save_blk
)
1738 struct xfs_da_blkinfo
*drop_info
;
1739 struct xfs_da_blkinfo
*save_info
;
1740 struct xfs_da_blkinfo
*tmp_info
;
1741 struct xfs_da_args
*args
;
1746 * Set up environment.
1749 ASSERT(args
!= NULL
);
1750 save_info
= save_blk
->bp
->b_addr
;
1751 drop_info
= drop_blk
->bp
->b_addr
;
1752 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1753 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1754 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1755 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1756 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1757 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1758 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1759 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1762 * Unlink the leaf block from the doubly linked chain of leaves.
1764 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1765 trace_xfs_da_unlink_back(args
);
1766 save_info
->back
= drop_info
->back
;
1767 if (drop_info
->back
) {
1768 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1769 be32_to_cpu(drop_info
->back
),
1770 -1, &bp
, args
->whichfork
);
1774 tmp_info
= bp
->b_addr
;
1775 ASSERT(tmp_info
->magic
== save_info
->magic
);
1776 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1777 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1778 xfs_trans_log_buf(args
->trans
, bp
, 0,
1779 sizeof(*tmp_info
) - 1);
1782 trace_xfs_da_unlink_forward(args
);
1783 save_info
->forw
= drop_info
->forw
;
1784 if (drop_info
->forw
) {
1785 error
= xfs_da3_node_read(args
->trans
, args
->dp
,
1786 be32_to_cpu(drop_info
->forw
),
1787 -1, &bp
, args
->whichfork
);
1791 tmp_info
= bp
->b_addr
;
1792 ASSERT(tmp_info
->magic
== save_info
->magic
);
1793 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1794 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1795 xfs_trans_log_buf(args
->trans
, bp
, 0,
1796 sizeof(*tmp_info
) - 1);
1800 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1805 * Move a path "forward" or "!forward" one block at the current level.
1807 * This routine will adjust a "path" to point to the next block
1808 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1809 * Btree, including updating pointers to the intermediate nodes between
1810 * the new bottom and the root.
1814 struct xfs_da_state
*state
,
1815 struct xfs_da_state_path
*path
,
1820 struct xfs_da_state_blk
*blk
;
1821 struct xfs_da_blkinfo
*info
;
1822 struct xfs_da_intnode
*node
;
1823 struct xfs_da_args
*args
;
1824 struct xfs_da_node_entry
*btree
;
1825 struct xfs_da3_icnode_hdr nodehdr
;
1827 xfs_dablk_t blkno
= 0;
1830 struct xfs_inode
*dp
= state
->args
->dp
;
1832 trace_xfs_da_path_shift(state
->args
);
1835 * Roll up the Btree looking for the first block where our
1836 * current index is not at the edge of the block. Note that
1837 * we skip the bottom layer because we want the sibling block.
1840 ASSERT(args
!= NULL
);
1841 ASSERT(path
!= NULL
);
1842 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1843 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1844 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1845 node
= blk
->bp
->b_addr
;
1846 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1847 btree
= dp
->d_ops
->node_tree_p(node
);
1849 if (forward
&& (blk
->index
< nodehdr
.count
- 1)) {
1851 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1853 } else if (!forward
&& (blk
->index
> 0)) {
1855 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1860 *result
= -ENOENT
; /* we're out of our tree */
1861 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1866 * Roll down the edge of the subtree until we reach the
1867 * same depth we were at originally.
1869 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1871 * Read the next child block into a local buffer.
1873 error
= xfs_da3_node_read(args
->trans
, dp
, blkno
, -1, &bp
,
1879 * Release the old block (if it's dirty, the trans doesn't
1880 * actually let go) and swap the local buffer into the path
1881 * structure. This ensures failure of the above read doesn't set
1882 * a NULL buffer in an active slot in the path.
1885 xfs_trans_brelse(args
->trans
, blk
->bp
);
1889 info
= blk
->bp
->b_addr
;
1890 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1891 info
->magic
== cpu_to_be16(XFS_DA3_NODE_MAGIC
) ||
1892 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1893 info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
) ||
1894 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
) ||
1895 info
->magic
== cpu_to_be16(XFS_ATTR3_LEAF_MAGIC
));
1899 * Note: we flatten the magic number to a single type so we
1900 * don't have to compare against crc/non-crc types elsewhere.
1902 switch (be16_to_cpu(info
->magic
)) {
1903 case XFS_DA_NODE_MAGIC
:
1904 case XFS_DA3_NODE_MAGIC
:
1905 blk
->magic
= XFS_DA_NODE_MAGIC
;
1906 node
= (xfs_da_intnode_t
*)info
;
1907 dp
->d_ops
->node_hdr_from_disk(&nodehdr
, node
);
1908 btree
= dp
->d_ops
->node_tree_p(node
);
1909 blk
->hashval
= be32_to_cpu(btree
[nodehdr
.count
- 1].hashval
);
1913 blk
->index
= nodehdr
.count
- 1;
1914 blkno
= be32_to_cpu(btree
[blk
->index
].before
);
1916 case XFS_ATTR_LEAF_MAGIC
:
1917 case XFS_ATTR3_LEAF_MAGIC
:
1918 blk
->magic
= XFS_ATTR_LEAF_MAGIC
;
1919 ASSERT(level
== path
->active
-1);
1921 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1923 case XFS_DIR2_LEAFN_MAGIC
:
1924 case XFS_DIR3_LEAFN_MAGIC
:
1925 blk
->magic
= XFS_DIR2_LEAFN_MAGIC
;
1926 ASSERT(level
== path
->active
-1);
1928 blk
->hashval
= xfs_dir2_leafn_lasthash(args
->dp
,
1941 /*========================================================================
1943 *========================================================================*/
1946 * Implement a simple hash on a character string.
1947 * Rotate the hash value by 7 bits, then XOR each character in.
1948 * This is implemented with some source-level loop unrolling.
1951 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1956 * Do four characters at a time as long as we can.
1958 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1959 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1960 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1963 * Now do the rest of the characters.
1967 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1970 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1972 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1973 default: /* case 0: */
1980 struct xfs_da_args
*args
,
1981 const unsigned char *name
,
1984 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1985 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1989 xfs_default_hashname(
1990 struct xfs_name
*name
)
1992 return xfs_da_hashname(name
->name
, name
->len
);
1995 const struct xfs_nameops xfs_default_nameops
= {
1996 .hashname
= xfs_default_hashname
,
1997 .compname
= xfs_da_compname
2001 xfs_da_grow_inode_int(
2002 struct xfs_da_args
*args
,
2006 struct xfs_trans
*tp
= args
->trans
;
2007 struct xfs_inode
*dp
= args
->dp
;
2008 int w
= args
->whichfork
;
2009 xfs_rfsblock_t nblks
= dp
->i_d
.di_nblocks
;
2010 struct xfs_bmbt_irec map
, *mapp
;
2011 int nmap
, error
, got
, i
, mapi
;
2014 * Find a spot in the file space to put the new block.
2016 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
2021 * Try mapping it in one filesystem block.
2024 ASSERT(args
->firstblock
!= NULL
);
2025 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
2026 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
2027 args
->firstblock
, args
->total
, &map
, &nmap
,
2036 } else if (nmap
== 0 && count
> 1) {
2041 * If we didn't get it and the block might work if fragmented,
2042 * try without the CONTIG flag. Loop until we get it all.
2044 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
2045 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
2046 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
2047 c
= (int)(*bno
+ count
- b
);
2048 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
2049 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2050 args
->firstblock
, args
->total
,
2051 &mapp
[mapi
], &nmap
, args
->flist
);
2057 b
= mapp
[mapi
- 1].br_startoff
+
2058 mapp
[mapi
- 1].br_blockcount
;
2066 * Count the blocks we got, make sure it matches the total.
2068 for (i
= 0, got
= 0; i
< mapi
; i
++)
2069 got
+= mapp
[i
].br_blockcount
;
2070 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
2071 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
2077 /* account for newly allocated blocks in reserved blocks total */
2078 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
2087 * Add a block to the btree ahead of the file.
2088 * Return the new block number to the caller.
2092 struct xfs_da_args
*args
,
2093 xfs_dablk_t
*new_blkno
)
2098 trace_xfs_da_grow_inode(args
);
2100 bno
= args
->geo
->leafblk
;
2101 error
= xfs_da_grow_inode_int(args
, &bno
, args
->geo
->fsbcount
);
2103 *new_blkno
= (xfs_dablk_t
)bno
;
2108 * Ick. We need to always be able to remove a btree block, even
2109 * if there's no space reservation because the filesystem is full.
2110 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2111 * It swaps the target block with the last block in the file. The
2112 * last block in the file can always be removed since it can't cause
2113 * a bmap btree split to do that.
2116 xfs_da3_swap_lastblock(
2117 struct xfs_da_args
*args
,
2118 xfs_dablk_t
*dead_blknop
,
2119 struct xfs_buf
**dead_bufp
)
2121 struct xfs_da_blkinfo
*dead_info
;
2122 struct xfs_da_blkinfo
*sib_info
;
2123 struct xfs_da_intnode
*par_node
;
2124 struct xfs_da_intnode
*dead_node
;
2125 struct xfs_dir2_leaf
*dead_leaf2
;
2126 struct xfs_da_node_entry
*btree
;
2127 struct xfs_da3_icnode_hdr par_hdr
;
2128 struct xfs_inode
*dp
;
2129 struct xfs_trans
*tp
;
2130 struct xfs_mount
*mp
;
2131 struct xfs_buf
*dead_buf
;
2132 struct xfs_buf
*last_buf
;
2133 struct xfs_buf
*sib_buf
;
2134 struct xfs_buf
*par_buf
;
2135 xfs_dahash_t dead_hash
;
2136 xfs_fileoff_t lastoff
;
2137 xfs_dablk_t dead_blkno
;
2138 xfs_dablk_t last_blkno
;
2139 xfs_dablk_t sib_blkno
;
2140 xfs_dablk_t par_blkno
;
2147 trace_xfs_da_swap_lastblock(args
);
2149 dead_buf
= *dead_bufp
;
2150 dead_blkno
= *dead_blknop
;
2153 w
= args
->whichfork
;
2154 ASSERT(w
== XFS_DATA_FORK
);
2156 lastoff
= args
->geo
->freeblk
;
2157 error
= xfs_bmap_last_before(tp
, dp
, &lastoff
, w
);
2160 if (unlikely(lastoff
== 0)) {
2161 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
2163 return -EFSCORRUPTED
;
2166 * Read the last block in the btree space.
2168 last_blkno
= (xfs_dablk_t
)lastoff
- args
->geo
->fsbcount
;
2169 error
= xfs_da3_node_read(tp
, dp
, last_blkno
, -1, &last_buf
, w
);
2173 * Copy the last block into the dead buffer and log it.
2175 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, args
->geo
->blksize
);
2176 xfs_trans_log_buf(tp
, dead_buf
, 0, args
->geo
->blksize
- 1);
2177 dead_info
= dead_buf
->b_addr
;
2179 * Get values from the moved block.
2181 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
2182 dead_info
->magic
== cpu_to_be16(XFS_DIR3_LEAFN_MAGIC
)) {
2183 struct xfs_dir3_icleaf_hdr leafhdr
;
2184 struct xfs_dir2_leaf_entry
*ents
;
2186 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
2187 dp
->d_ops
->leaf_hdr_from_disk(&leafhdr
, dead_leaf2
);
2188 ents
= dp
->d_ops
->leaf_ents_p(dead_leaf2
);
2190 dead_hash
= be32_to_cpu(ents
[leafhdr
.count
- 1].hashval
);
2192 struct xfs_da3_icnode_hdr deadhdr
;
2194 dead_node
= (xfs_da_intnode_t
*)dead_info
;
2195 dp
->d_ops
->node_hdr_from_disk(&deadhdr
, dead_node
);
2196 btree
= dp
->d_ops
->node_tree_p(dead_node
);
2197 dead_level
= deadhdr
.level
;
2198 dead_hash
= be32_to_cpu(btree
[deadhdr
.count
- 1].hashval
);
2200 sib_buf
= par_buf
= NULL
;
2202 * If the moved block has a left sibling, fix up the pointers.
2204 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
2205 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2208 sib_info
= sib_buf
->b_addr
;
2210 be32_to_cpu(sib_info
->forw
) != last_blkno
||
2211 sib_info
->magic
!= dead_info
->magic
)) {
2212 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2213 XFS_ERRLEVEL_LOW
, mp
);
2214 error
= -EFSCORRUPTED
;
2217 sib_info
->forw
= cpu_to_be32(dead_blkno
);
2218 xfs_trans_log_buf(tp
, sib_buf
,
2219 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
2220 sizeof(sib_info
->forw
)));
2224 * If the moved block has a right sibling, fix up the pointers.
2226 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
2227 error
= xfs_da3_node_read(tp
, dp
, sib_blkno
, -1, &sib_buf
, w
);
2230 sib_info
= sib_buf
->b_addr
;
2232 be32_to_cpu(sib_info
->back
) != last_blkno
||
2233 sib_info
->magic
!= dead_info
->magic
)) {
2234 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2235 XFS_ERRLEVEL_LOW
, mp
);
2236 error
= -EFSCORRUPTED
;
2239 sib_info
->back
= cpu_to_be32(dead_blkno
);
2240 xfs_trans_log_buf(tp
, sib_buf
,
2241 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
2242 sizeof(sib_info
->back
)));
2245 par_blkno
= args
->geo
->leafblk
;
2248 * Walk down the tree looking for the parent of the moved block.
2251 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2254 par_node
= par_buf
->b_addr
;
2255 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2256 if (level
>= 0 && level
!= par_hdr
.level
+ 1) {
2257 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2258 XFS_ERRLEVEL_LOW
, mp
);
2259 error
= -EFSCORRUPTED
;
2262 level
= par_hdr
.level
;
2263 btree
= dp
->d_ops
->node_tree_p(par_node
);
2265 entno
< par_hdr
.count
&&
2266 be32_to_cpu(btree
[entno
].hashval
) < dead_hash
;
2269 if (entno
== par_hdr
.count
) {
2270 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2271 XFS_ERRLEVEL_LOW
, mp
);
2272 error
= -EFSCORRUPTED
;
2275 par_blkno
= be32_to_cpu(btree
[entno
].before
);
2276 if (level
== dead_level
+ 1)
2278 xfs_trans_brelse(tp
, par_buf
);
2282 * We're in the right parent block.
2283 * Look for the right entry.
2287 entno
< par_hdr
.count
&&
2288 be32_to_cpu(btree
[entno
].before
) != last_blkno
;
2291 if (entno
< par_hdr
.count
)
2293 par_blkno
= par_hdr
.forw
;
2294 xfs_trans_brelse(tp
, par_buf
);
2296 if (unlikely(par_blkno
== 0)) {
2297 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2298 XFS_ERRLEVEL_LOW
, mp
);
2299 error
= -EFSCORRUPTED
;
2302 error
= xfs_da3_node_read(tp
, dp
, par_blkno
, -1, &par_buf
, w
);
2305 par_node
= par_buf
->b_addr
;
2306 dp
->d_ops
->node_hdr_from_disk(&par_hdr
, par_node
);
2307 if (par_hdr
.level
!= level
) {
2308 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2309 XFS_ERRLEVEL_LOW
, mp
);
2310 error
= -EFSCORRUPTED
;
2313 btree
= dp
->d_ops
->node_tree_p(par_node
);
2317 * Update the parent entry pointing to the moved block.
2319 btree
[entno
].before
= cpu_to_be32(dead_blkno
);
2320 xfs_trans_log_buf(tp
, par_buf
,
2321 XFS_DA_LOGRANGE(par_node
, &btree
[entno
].before
,
2322 sizeof(btree
[entno
].before
)));
2323 *dead_blknop
= last_blkno
;
2324 *dead_bufp
= last_buf
;
2328 xfs_trans_brelse(tp
, par_buf
);
2330 xfs_trans_brelse(tp
, sib_buf
);
2331 xfs_trans_brelse(tp
, last_buf
);
2336 * Remove a btree block from a directory or attribute.
2339 xfs_da_shrink_inode(
2340 xfs_da_args_t
*args
,
2341 xfs_dablk_t dead_blkno
,
2342 struct xfs_buf
*dead_buf
)
2345 int done
, error
, w
, count
;
2348 trace_xfs_da_shrink_inode(args
);
2351 w
= args
->whichfork
;
2353 count
= args
->geo
->fsbcount
;
2356 * Remove extents. If we get ENOSPC for a dir we have to move
2357 * the last block to the place we want to kill.
2359 error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2360 xfs_bmapi_aflag(w
), 0, args
->firstblock
,
2361 args
->flist
, &done
);
2362 if (error
== -ENOSPC
) {
2363 if (w
!= XFS_DATA_FORK
)
2365 error
= xfs_da3_swap_lastblock(args
, &dead_blkno
,
2373 xfs_trans_binval(tp
, dead_buf
);
2378 * See if the mapping(s) for this btree block are valid, i.e.
2379 * don't contain holes, are logically contiguous, and cover the whole range.
2382 xfs_da_map_covers_blocks(
2384 xfs_bmbt_irec_t
*mapp
,
2391 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2392 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2393 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2396 if (off
!= mapp
[i
].br_startoff
) {
2399 off
+= mapp
[i
].br_blockcount
;
2401 return off
== bno
+ count
;
2405 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2407 * For the single map case, it is assumed that the caller has provided a pointer
2408 * to a valid xfs_buf_map. For the multiple map case, this function will
2409 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2410 * map pointer with the allocated map.
2413 xfs_buf_map_from_irec(
2414 struct xfs_mount
*mp
,
2415 struct xfs_buf_map
**mapp
,
2417 struct xfs_bmbt_irec
*irecs
,
2420 struct xfs_buf_map
*map
;
2423 ASSERT(*nmaps
== 1);
2424 ASSERT(nirecs
>= 1);
2427 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
),
2428 KM_SLEEP
| KM_NOFS
);
2436 for (i
= 0; i
< *nmaps
; i
++) {
2437 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2438 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2439 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2440 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2446 * Map the block we are given ready for reading. There are three possible return
2448 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2449 * caller knows not to execute a subsequent read.
2450 * 0 - if we mapped the block successfully
2451 * >0 - positive error number if there was an error.
2455 struct xfs_inode
*dp
,
2457 xfs_daddr_t mappedbno
,
2459 struct xfs_buf_map
**map
,
2462 struct xfs_mount
*mp
= dp
->i_mount
;
2465 struct xfs_bmbt_irec irec
;
2466 struct xfs_bmbt_irec
*irecs
= &irec
;
2469 ASSERT(map
&& *map
);
2470 ASSERT(*nmaps
== 1);
2472 if (whichfork
== XFS_DATA_FORK
)
2473 nfsb
= mp
->m_dir_geo
->fsbcount
;
2475 nfsb
= mp
->m_attr_geo
->fsbcount
;
2478 * Caller doesn't have a mapping. -2 means don't complain
2479 * if we land in a hole.
2481 if (mappedbno
== -1 || mappedbno
== -2) {
2483 * Optimize the one-block case.
2486 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
,
2487 KM_SLEEP
| KM_NOFS
);
2490 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2491 &nirecs
, xfs_bmapi_aflag(whichfork
));
2495 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2496 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2497 irecs
->br_blockcount
= nfsb
;
2498 irecs
->br_state
= 0;
2502 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2503 error
= mappedbno
== -2 ? -1 : -EFSCORRUPTED
;
2504 if (unlikely(error
== -EFSCORRUPTED
)) {
2505 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2507 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2508 __func__
, (long long)bno
,
2509 (long long)dp
->i_ino
);
2510 for (i
= 0; i
< *nmaps
; i
++) {
2512 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2514 (long long)irecs
[i
].br_startoff
,
2515 (long long)irecs
[i
].br_startblock
,
2516 (long long)irecs
[i
].br_blockcount
,
2520 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2521 XFS_ERRLEVEL_LOW
, mp
);
2525 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2533 * Get a buffer for the dir/attr block.
2537 struct xfs_trans
*trans
,
2538 struct xfs_inode
*dp
,
2540 xfs_daddr_t mappedbno
,
2541 struct xfs_buf
**bpp
,
2545 struct xfs_buf_map map
;
2546 struct xfs_buf_map
*mapp
;
2553 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2556 /* mapping a hole is not an error, but we don't continue */
2562 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2564 error
= bp
? bp
->b_error
: -EIO
;
2567 xfs_trans_brelse(trans
, bp
);
2581 * Get a buffer for the dir/attr block, fill in the contents.
2585 struct xfs_trans
*trans
,
2586 struct xfs_inode
*dp
,
2588 xfs_daddr_t mappedbno
,
2589 struct xfs_buf
**bpp
,
2591 const struct xfs_buf_ops
*ops
)
2594 struct xfs_buf_map map
;
2595 struct xfs_buf_map
*mapp
;
2602 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2605 /* mapping a hole is not an error, but we don't continue */
2611 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2612 dp
->i_mount
->m_ddev_targp
,
2613 mapp
, nmap
, 0, &bp
, ops
);
2617 if (whichfork
== XFS_ATTR_FORK
)
2618 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2620 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2630 * Readahead the dir/attr block.
2634 struct xfs_inode
*dp
,
2636 xfs_daddr_t mappedbno
,
2638 const struct xfs_buf_ops
*ops
)
2640 struct xfs_buf_map map
;
2641 struct xfs_buf_map
*mapp
;
2647 error
= xfs_dabuf_map(dp
, bno
, mappedbno
, whichfork
,
2650 /* mapping a hole is not an error, but we don't continue */
2656 mappedbno
= mapp
[0].bm_bn
;
2657 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, ops
);