2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
23 #include "xfs_trans.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_bmap_btree.h"
30 #include "xfs_dir2_format.h"
31 #include "xfs_dir2_priv.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
38 #include "xfs_attr_leaf.h"
39 #include "xfs_error.h"
40 #include "xfs_trace.h"
45 * Routines to implement directories as Btrees of hashed names.
48 /*========================================================================
49 * Function prototypes for the kernel.
50 *========================================================================*/
53 * Routines used for growing the Btree.
55 STATIC
int xfs_da_root_split(xfs_da_state_t
*state
,
56 xfs_da_state_blk_t
*existing_root
,
57 xfs_da_state_blk_t
*new_child
);
58 STATIC
int xfs_da_node_split(xfs_da_state_t
*state
,
59 xfs_da_state_blk_t
*existing_blk
,
60 xfs_da_state_blk_t
*split_blk
,
61 xfs_da_state_blk_t
*blk_to_add
,
64 STATIC
void xfs_da_node_rebalance(xfs_da_state_t
*state
,
65 xfs_da_state_blk_t
*node_blk_1
,
66 xfs_da_state_blk_t
*node_blk_2
);
67 STATIC
void xfs_da_node_add(xfs_da_state_t
*state
,
68 xfs_da_state_blk_t
*old_node_blk
,
69 xfs_da_state_blk_t
*new_node_blk
);
72 * Routines used for shrinking the Btree.
74 STATIC
int xfs_da_root_join(xfs_da_state_t
*state
,
75 xfs_da_state_blk_t
*root_blk
);
76 STATIC
int xfs_da_node_toosmall(xfs_da_state_t
*state
, int *retval
);
77 STATIC
void xfs_da_node_remove(xfs_da_state_t
*state
,
78 xfs_da_state_blk_t
*drop_blk
);
79 STATIC
void xfs_da_node_unbalance(xfs_da_state_t
*state
,
80 xfs_da_state_blk_t
*src_node_blk
,
81 xfs_da_state_blk_t
*dst_node_blk
);
86 STATIC uint
xfs_da_node_lasthash(struct xfs_buf
*bp
, int *count
);
87 STATIC
int xfs_da_node_order(struct xfs_buf
*node1_bp
,
88 struct xfs_buf
*node2_bp
);
89 STATIC
int xfs_da_blk_unlink(xfs_da_state_t
*state
,
90 xfs_da_state_blk_t
*drop_blk
,
91 xfs_da_state_blk_t
*save_blk
);
92 STATIC
void xfs_da_state_kill_altpath(xfs_da_state_t
*state
);
98 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
99 struct xfs_da_node_hdr
*hdr
= bp
->b_addr
;
102 block_ok
= hdr
->info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
);
103 block_ok
= block_ok
&&
104 be16_to_cpu(hdr
->level
) > 0 &&
105 be16_to_cpu(hdr
->count
) > 0 ;
107 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, hdr
);
108 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
114 xfs_da_node_write_verify(
117 xfs_da_node_verify(bp
);
121 * leaf/node format detection on trees is sketchy, so a node read can be done on
122 * leaf level blocks when detection identifies the tree as a node format tree
123 * incorrectly. In this case, we need to swap the verifier to match the correct
124 * format of the block being read.
127 xfs_da_node_read_verify(
130 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
131 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
133 switch (be16_to_cpu(info
->magic
)) {
134 case XFS_DA_NODE_MAGIC
:
135 xfs_da_node_verify(bp
);
137 case XFS_ATTR_LEAF_MAGIC
:
138 bp
->b_ops
= &xfs_attr_leaf_buf_ops
;
139 bp
->b_ops
->verify_read(bp
);
141 case XFS_DIR2_LEAFN_MAGIC
:
142 bp
->b_ops
= &xfs_dir2_leafn_buf_ops
;
143 bp
->b_ops
->verify_read(bp
);
146 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
,
148 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
153 const struct xfs_buf_ops xfs_da_node_buf_ops
= {
154 .verify_read
= xfs_da_node_read_verify
,
155 .verify_write
= xfs_da_node_write_verify
,
161 struct xfs_trans
*tp
,
162 struct xfs_inode
*dp
,
164 xfs_daddr_t mappedbno
,
165 struct xfs_buf
**bpp
,
168 return xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
169 which_fork
, &xfs_da_node_buf_ops
);
172 /*========================================================================
173 * Routines used for growing the Btree.
174 *========================================================================*/
177 * Create the initial contents of an intermediate node.
180 xfs_da_node_create(xfs_da_args_t
*args
, xfs_dablk_t blkno
, int level
,
181 struct xfs_buf
**bpp
, int whichfork
)
183 xfs_da_intnode_t
*node
;
188 trace_xfs_da_node_create(args
);
191 error
= xfs_da_get_buf(tp
, args
->dp
, blkno
, -1, &bp
, whichfork
);
196 node
->hdr
.info
.forw
= 0;
197 node
->hdr
.info
.back
= 0;
198 node
->hdr
.info
.magic
= cpu_to_be16(XFS_DA_NODE_MAGIC
);
199 node
->hdr
.info
.pad
= 0;
201 node
->hdr
.level
= cpu_to_be16(level
);
203 xfs_trans_log_buf(tp
, bp
,
204 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
206 bp
->b_ops
= &xfs_da_node_buf_ops
;
212 * Split a leaf node, rebalance, then possibly split
213 * intermediate nodes, rebalance, etc.
216 xfs_da_split(xfs_da_state_t
*state
)
218 xfs_da_state_blk_t
*oldblk
, *newblk
, *addblk
;
219 xfs_da_intnode_t
*node
;
221 int max
, action
, error
, i
;
223 trace_xfs_da_split(state
->args
);
226 * Walk back up the tree splitting/inserting/adjusting as necessary.
227 * If we need to insert and there isn't room, split the node, then
228 * decide which fragment to insert the new block from below into.
229 * Note that we may split the root this way, but we need more fixup.
231 max
= state
->path
.active
- 1;
232 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
233 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
234 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
236 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
237 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
238 oldblk
= &state
->path
.blk
[i
];
239 newblk
= &state
->altpath
.blk
[i
];
242 * If a leaf node then
243 * Allocate a new leaf node, then rebalance across them.
244 * else if an intermediate node then
245 * We split on the last layer, must we split the node?
247 switch (oldblk
->magic
) {
248 case XFS_ATTR_LEAF_MAGIC
:
249 error
= xfs_attr_leaf_split(state
, oldblk
, newblk
);
250 if ((error
!= 0) && (error
!= ENOSPC
)) {
251 return(error
); /* GROT: attr is inconsistent */
258 * Entry wouldn't fit, split the leaf again.
260 state
->extravalid
= 1;
262 state
->extraafter
= 0; /* before newblk */
263 trace_xfs_attr_leaf_split_before(state
->args
);
264 error
= xfs_attr_leaf_split(state
, oldblk
,
267 state
->extraafter
= 1; /* after newblk */
268 trace_xfs_attr_leaf_split_after(state
->args
);
269 error
= xfs_attr_leaf_split(state
, newblk
,
273 return(error
); /* GROT: attr inconsistent */
276 case XFS_DIR2_LEAFN_MAGIC
:
277 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
282 case XFS_DA_NODE_MAGIC
:
283 error
= xfs_da_node_split(state
, oldblk
, newblk
, addblk
,
287 return(error
); /* GROT: dir is inconsistent */
289 * Record the newly split block for the next time thru?
299 * Update the btree to show the new hashval for this child.
301 xfs_da_fixhashpath(state
, &state
->path
);
307 * Split the root node.
309 ASSERT(state
->path
.active
== 0);
310 oldblk
= &state
->path
.blk
[0];
311 error
= xfs_da_root_split(state
, oldblk
, addblk
);
314 return(error
); /* GROT: dir is inconsistent */
318 * Update pointers to the node which used to be block 0 and
319 * just got bumped because of the addition of a new root node.
320 * There might be three blocks involved if a double split occurred,
321 * and the original block 0 could be at any position in the list.
324 node
= oldblk
->bp
->b_addr
;
325 if (node
->hdr
.info
.forw
) {
326 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
329 ASSERT(state
->extravalid
);
330 bp
= state
->extrablk
.bp
;
333 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
334 xfs_trans_log_buf(state
->args
->trans
, bp
,
335 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
336 sizeof(node
->hdr
.info
)));
338 node
= oldblk
->bp
->b_addr
;
339 if (node
->hdr
.info
.back
) {
340 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
343 ASSERT(state
->extravalid
);
344 bp
= state
->extrablk
.bp
;
347 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
348 xfs_trans_log_buf(state
->args
->trans
, bp
,
349 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
350 sizeof(node
->hdr
.info
)));
357 * Split the root. We have to create a new root and point to the two
358 * parts (the split old root) that we just created. Copy block zero to
359 * the EOF, extending the inode in process.
361 STATIC
int /* error */
362 xfs_da_root_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
363 xfs_da_state_blk_t
*blk2
)
365 xfs_da_intnode_t
*node
, *oldroot
;
373 xfs_dir2_leaf_t
*leaf
;
375 trace_xfs_da_root_split(state
->args
);
378 * Copy the existing (incorrect) block from the root node position
379 * to a free space somewhere.
382 ASSERT(args
!= NULL
);
383 error
= xfs_da_grow_inode(args
, &blkno
);
389 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
394 oldroot
= blk1
->bp
->b_addr
;
395 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
)) {
396 size
= (int)((char *)&oldroot
->btree
[be16_to_cpu(oldroot
->hdr
.count
)] -
399 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
));
400 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
401 size
= (int)((char *)&leaf
->ents
[be16_to_cpu(leaf
->hdr
.count
)] -
404 memcpy(node
, oldroot
, size
);
405 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
407 bp
->b_ops
= blk1
->bp
->b_ops
;
412 * Set up the new root node.
414 error
= xfs_da_node_create(args
,
415 (args
->whichfork
== XFS_DATA_FORK
) ? mp
->m_dirleafblk
: 0,
416 be16_to_cpu(node
->hdr
.level
) + 1, &bp
, args
->whichfork
);
420 node
->btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
421 node
->btree
[0].before
= cpu_to_be32(blk1
->blkno
);
422 node
->btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
423 node
->btree
[1].before
= cpu_to_be32(blk2
->blkno
);
424 node
->hdr
.count
= cpu_to_be16(2);
427 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
428 ASSERT(blk1
->blkno
>= mp
->m_dirleafblk
&&
429 blk1
->blkno
< mp
->m_dirfreeblk
);
430 ASSERT(blk2
->blkno
>= mp
->m_dirleafblk
&&
431 blk2
->blkno
< mp
->m_dirfreeblk
);
435 /* Header is already logged by xfs_da_node_create */
436 xfs_trans_log_buf(tp
, bp
,
437 XFS_DA_LOGRANGE(node
, node
->btree
,
438 sizeof(xfs_da_node_entry_t
) * 2));
444 * Split the node, rebalance, then add the new entry.
446 STATIC
int /* error */
447 xfs_da_node_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
448 xfs_da_state_blk_t
*newblk
,
449 xfs_da_state_blk_t
*addblk
,
450 int treelevel
, int *result
)
452 xfs_da_intnode_t
*node
;
457 trace_xfs_da_node_split(state
->args
);
459 node
= oldblk
->bp
->b_addr
;
460 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
463 * With V2 dirs the extra block is data or freespace.
465 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
466 newcount
= 1 + useextra
;
468 * Do we have to split the node?
470 if ((be16_to_cpu(node
->hdr
.count
) + newcount
) > state
->node_ents
) {
472 * Allocate a new node, add to the doubly linked chain of
473 * nodes, then move some of our excess entries into it.
475 error
= xfs_da_grow_inode(state
->args
, &blkno
);
477 return(error
); /* GROT: dir is inconsistent */
479 error
= xfs_da_node_create(state
->args
, blkno
, treelevel
,
480 &newblk
->bp
, state
->args
->whichfork
);
482 return(error
); /* GROT: dir is inconsistent */
483 newblk
->blkno
= blkno
;
484 newblk
->magic
= XFS_DA_NODE_MAGIC
;
485 xfs_da_node_rebalance(state
, oldblk
, newblk
);
486 error
= xfs_da_blk_link(state
, oldblk
, newblk
);
495 * Insert the new entry(s) into the correct block
496 * (updating last hashval in the process).
498 * xfs_da_node_add() inserts BEFORE the given index,
499 * and as a result of using node_lookup_int() we always
500 * point to a valid entry (not after one), but a split
501 * operation always results in a new block whose hashvals
502 * FOLLOW the current block.
504 * If we had double-split op below us, then add the extra block too.
506 node
= oldblk
->bp
->b_addr
;
507 if (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)) {
509 xfs_da_node_add(state
, oldblk
, addblk
);
511 if (state
->extraafter
)
513 xfs_da_node_add(state
, oldblk
, &state
->extrablk
);
514 state
->extravalid
= 0;
518 xfs_da_node_add(state
, newblk
, addblk
);
520 if (state
->extraafter
)
522 xfs_da_node_add(state
, newblk
, &state
->extrablk
);
523 state
->extravalid
= 0;
531 * Balance the btree elements between two intermediate nodes,
532 * usually one full and one empty.
534 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
537 xfs_da_node_rebalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
538 xfs_da_state_blk_t
*blk2
)
540 xfs_da_intnode_t
*node1
, *node2
, *tmpnode
;
541 xfs_da_node_entry_t
*btree_s
, *btree_d
;
545 trace_xfs_da_node_rebalance(state
->args
);
547 node1
= blk1
->bp
->b_addr
;
548 node2
= blk2
->bp
->b_addr
;
550 * Figure out how many entries need to move, and in which direction.
551 * Swap the nodes around if that makes it simpler.
553 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
554 ((be32_to_cpu(node2
->btree
[0].hashval
) < be32_to_cpu(node1
->btree
[0].hashval
)) ||
555 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
556 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
561 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
562 ASSERT(node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
563 count
= (be16_to_cpu(node1
->hdr
.count
) - be16_to_cpu(node2
->hdr
.count
)) / 2;
566 tp
= state
->args
->trans
;
568 * Two cases: high-to-low and low-to-high.
572 * Move elements in node2 up to make a hole.
574 if ((tmp
= be16_to_cpu(node2
->hdr
.count
)) > 0) {
575 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
576 btree_s
= &node2
->btree
[0];
577 btree_d
= &node2
->btree
[count
];
578 memmove(btree_d
, btree_s
, tmp
);
582 * Move the req'd B-tree elements from high in node1 to
585 be16_add_cpu(&node2
->hdr
.count
, count
);
586 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
587 btree_s
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
) - count
];
588 btree_d
= &node2
->btree
[0];
589 memcpy(btree_d
, btree_s
, tmp
);
590 be16_add_cpu(&node1
->hdr
.count
, -count
);
593 * Move the req'd B-tree elements from low in node2 to
597 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
598 btree_s
= &node2
->btree
[0];
599 btree_d
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
)];
600 memcpy(btree_d
, btree_s
, tmp
);
601 be16_add_cpu(&node1
->hdr
.count
, count
);
602 xfs_trans_log_buf(tp
, blk1
->bp
,
603 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
606 * Move elements in node2 down to fill the hole.
608 tmp
= be16_to_cpu(node2
->hdr
.count
) - count
;
609 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
610 btree_s
= &node2
->btree
[count
];
611 btree_d
= &node2
->btree
[0];
612 memmove(btree_d
, btree_s
, tmp
);
613 be16_add_cpu(&node2
->hdr
.count
, -count
);
617 * Log header of node 1 and all current bits of node 2.
619 xfs_trans_log_buf(tp
, blk1
->bp
,
620 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, sizeof(node1
->hdr
)));
621 xfs_trans_log_buf(tp
, blk2
->bp
,
622 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
624 sizeof(node2
->btree
[0]) * be16_to_cpu(node2
->hdr
.count
)));
627 * Record the last hashval from each block for upward propagation.
628 * (note: don't use the swapped node pointers)
630 node1
= blk1
->bp
->b_addr
;
631 node2
= blk2
->bp
->b_addr
;
632 blk1
->hashval
= be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
);
633 blk2
->hashval
= be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
);
636 * Adjust the expected index for insertion.
638 if (blk1
->index
>= be16_to_cpu(node1
->hdr
.count
)) {
639 blk2
->index
= blk1
->index
- be16_to_cpu(node1
->hdr
.count
);
640 blk1
->index
= be16_to_cpu(node1
->hdr
.count
) + 1; /* make it invalid */
645 * Add a new entry to an intermediate node.
648 xfs_da_node_add(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
649 xfs_da_state_blk_t
*newblk
)
651 xfs_da_intnode_t
*node
;
652 xfs_da_node_entry_t
*btree
;
655 trace_xfs_da_node_add(state
->args
);
657 node
= oldblk
->bp
->b_addr
;
658 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
659 ASSERT((oldblk
->index
>= 0) && (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)));
660 ASSERT(newblk
->blkno
!= 0);
661 if (state
->args
->whichfork
== XFS_DATA_FORK
)
662 ASSERT(newblk
->blkno
>= state
->mp
->m_dirleafblk
&&
663 newblk
->blkno
< state
->mp
->m_dirfreeblk
);
666 * We may need to make some room before we insert the new node.
669 btree
= &node
->btree
[ oldblk
->index
];
670 if (oldblk
->index
< be16_to_cpu(node
->hdr
.count
)) {
671 tmp
= (be16_to_cpu(node
->hdr
.count
) - oldblk
->index
) * (uint
)sizeof(*btree
);
672 memmove(btree
+ 1, btree
, tmp
);
674 btree
->hashval
= cpu_to_be32(newblk
->hashval
);
675 btree
->before
= cpu_to_be32(newblk
->blkno
);
676 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
677 XFS_DA_LOGRANGE(node
, btree
, tmp
+ sizeof(*btree
)));
678 be16_add_cpu(&node
->hdr
.count
, 1);
679 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
680 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
683 * Copy the last hash value from the oldblk to propagate upwards.
685 oldblk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1 ].hashval
);
688 /*========================================================================
689 * Routines used for shrinking the Btree.
690 *========================================================================*/
693 * Deallocate an empty leaf node, remove it from its parent,
694 * possibly deallocating that block, etc...
697 xfs_da_join(xfs_da_state_t
*state
)
699 xfs_da_state_blk_t
*drop_blk
, *save_blk
;
702 trace_xfs_da_join(state
->args
);
705 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
706 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
707 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
708 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
709 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
712 * Walk back up the tree joining/deallocating as necessary.
713 * When we stop dropping blocks, break out.
715 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
716 state
->path
.active
--) {
718 * See if we can combine the block with a neighbor.
719 * (action == 0) => no options, just leave
720 * (action == 1) => coalesce, then unlink
721 * (action == 2) => block empty, unlink it
723 switch (drop_blk
->magic
) {
724 case XFS_ATTR_LEAF_MAGIC
:
725 error
= xfs_attr_leaf_toosmall(state
, &action
);
730 xfs_attr_leaf_unbalance(state
, drop_blk
, save_blk
);
732 case XFS_DIR2_LEAFN_MAGIC
:
733 error
= xfs_dir2_leafn_toosmall(state
, &action
);
738 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
740 case XFS_DA_NODE_MAGIC
:
742 * Remove the offending node, fixup hashvals,
743 * check for a toosmall neighbor.
745 xfs_da_node_remove(state
, drop_blk
);
746 xfs_da_fixhashpath(state
, &state
->path
);
747 error
= xfs_da_node_toosmall(state
, &action
);
752 xfs_da_node_unbalance(state
, drop_blk
, save_blk
);
755 xfs_da_fixhashpath(state
, &state
->altpath
);
756 error
= xfs_da_blk_unlink(state
, drop_blk
, save_blk
);
757 xfs_da_state_kill_altpath(state
);
760 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
767 * We joined all the way to the top. If it turns out that
768 * we only have one entry in the root, make the child block
771 xfs_da_node_remove(state
, drop_blk
);
772 xfs_da_fixhashpath(state
, &state
->path
);
773 error
= xfs_da_root_join(state
, &state
->path
.blk
[0]);
779 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
781 __be16 magic
= blkinfo
->magic
;
784 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
785 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
787 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
788 ASSERT(!blkinfo
->forw
);
789 ASSERT(!blkinfo
->back
);
792 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
796 * We have only one entry in the root. Copy the only remaining child of
797 * the old root to block 0 as the new root node.
800 xfs_da_root_join(xfs_da_state_t
*state
, xfs_da_state_blk_t
*root_blk
)
802 xfs_da_intnode_t
*oldroot
;
808 trace_xfs_da_root_join(state
->args
);
811 ASSERT(args
!= NULL
);
812 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
813 oldroot
= root_blk
->bp
->b_addr
;
814 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
815 ASSERT(!oldroot
->hdr
.info
.forw
);
816 ASSERT(!oldroot
->hdr
.info
.back
);
819 * If the root has more than one child, then don't do anything.
821 if (be16_to_cpu(oldroot
->hdr
.count
) > 1)
825 * Read in the (only) child block, then copy those bytes into
826 * the root block's buffer and free the original child block.
828 child
= be32_to_cpu(oldroot
->btree
[0].before
);
830 error
= xfs_da_node_read(args
->trans
, args
->dp
, child
, -1, &bp
,
835 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
,
836 be16_to_cpu(oldroot
->hdr
.level
));
839 * This could be copying a leaf back into the root block in the case of
840 * there only being a single leaf block left in the tree. Hence we have
841 * to update the b_ops pointer as well to match the buffer type change
844 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, state
->blocksize
);
845 root_blk
->bp
->b_ops
= bp
->b_ops
;
846 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0, state
->blocksize
- 1);
847 error
= xfs_da_shrink_inode(args
, child
, bp
);
852 * Check a node block and its neighbors to see if the block should be
853 * collapsed into one or the other neighbor. Always keep the block
854 * with the smaller block number.
855 * If the current block is over 50% full, don't try to join it, return 0.
856 * If the block is empty, fill in the state structure and return 2.
857 * If it can be collapsed, fill in the state structure and return 1.
858 * If nothing can be done, return 0.
861 xfs_da_node_toosmall(xfs_da_state_t
*state
, int *action
)
863 xfs_da_intnode_t
*node
;
864 xfs_da_state_blk_t
*blk
;
865 xfs_da_blkinfo_t
*info
;
866 int count
, forward
, error
, retval
, i
;
870 trace_xfs_da_node_toosmall(state
->args
);
873 * Check for the degenerate case of the block being over 50% full.
874 * If so, it's not worth even looking to see if we might be able
875 * to coalesce with a sibling.
877 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
878 info
= blk
->bp
->b_addr
;
879 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
880 node
= (xfs_da_intnode_t
*)info
;
881 count
= be16_to_cpu(node
->hdr
.count
);
882 if (count
> (state
->node_ents
>> 1)) {
883 *action
= 0; /* blk over 50%, don't try to join */
884 return(0); /* blk over 50%, don't try to join */
888 * Check for the degenerate case of the block being empty.
889 * If the block is empty, we'll simply delete it, no need to
890 * coalesce it with a sibling block. We choose (arbitrarily)
891 * to merge with the forward block unless it is NULL.
895 * Make altpath point to the block we want to keep and
896 * path point to the block we want to drop (this one).
898 forward
= (info
->forw
!= 0);
899 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
900 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
913 * Examine each sibling block to see if we can coalesce with
914 * at least 25% free space to spare. We need to figure out
915 * whether to merge with the forward or the backward block.
916 * We prefer coalescing with the lower numbered sibling so as
917 * to shrink a directory over time.
919 /* start with smaller blk num */
920 forward
= (be32_to_cpu(info
->forw
) < be32_to_cpu(info
->back
));
921 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
923 blkno
= be32_to_cpu(info
->forw
);
925 blkno
= be32_to_cpu(info
->back
);
928 error
= xfs_da_node_read(state
->args
->trans
, state
->args
->dp
,
929 blkno
, -1, &bp
, state
->args
->whichfork
);
934 node
= (xfs_da_intnode_t
*)info
;
935 count
= state
->node_ents
;
936 count
-= state
->node_ents
>> 2;
937 count
-= be16_to_cpu(node
->hdr
.count
);
939 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
940 count
-= be16_to_cpu(node
->hdr
.count
);
941 xfs_trans_brelse(state
->args
->trans
, bp
);
943 break; /* fits with at least 25% to spare */
951 * Make altpath point to the block we want to keep (the lower
952 * numbered block) and path point to the block we want to drop.
954 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
955 if (blkno
< blk
->blkno
) {
956 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
966 error
= xfs_da_path_shift(state
, &state
->path
, forward
,
981 * Walk back up the tree adjusting hash values as necessary,
982 * when we stop making changes, return.
985 xfs_da_fixhashpath(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
)
987 xfs_da_state_blk_t
*blk
;
988 xfs_da_intnode_t
*node
;
989 xfs_da_node_entry_t
*btree
;
990 xfs_dahash_t lasthash
=0;
993 trace_xfs_da_fixhashpath(state
->args
);
995 level
= path
->active
-1;
996 blk
= &path
->blk
[ level
];
997 switch (blk
->magic
) {
998 case XFS_ATTR_LEAF_MAGIC
:
999 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
1003 case XFS_DIR2_LEAFN_MAGIC
:
1004 lasthash
= xfs_dir2_leafn_lasthash(blk
->bp
, &count
);
1008 case XFS_DA_NODE_MAGIC
:
1009 lasthash
= xfs_da_node_lasthash(blk
->bp
, &count
);
1014 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1015 node
= blk
->bp
->b_addr
;
1016 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1017 btree
= &node
->btree
[ blk
->index
];
1018 if (be32_to_cpu(btree
->hashval
) == lasthash
)
1020 blk
->hashval
= lasthash
;
1021 btree
->hashval
= cpu_to_be32(lasthash
);
1022 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1023 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
1025 lasthash
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1030 * Remove an entry from an intermediate node.
1033 xfs_da_node_remove(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
)
1035 xfs_da_intnode_t
*node
;
1036 xfs_da_node_entry_t
*btree
;
1039 trace_xfs_da_node_remove(state
->args
);
1041 node
= drop_blk
->bp
->b_addr
;
1042 ASSERT(drop_blk
->index
< be16_to_cpu(node
->hdr
.count
));
1043 ASSERT(drop_blk
->index
>= 0);
1046 * Copy over the offending entry, or just zero it out.
1048 btree
= &node
->btree
[drop_blk
->index
];
1049 if (drop_blk
->index
< (be16_to_cpu(node
->hdr
.count
)-1)) {
1050 tmp
= be16_to_cpu(node
->hdr
.count
) - drop_blk
->index
- 1;
1051 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1052 memmove(btree
, btree
+ 1, tmp
);
1053 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1054 XFS_DA_LOGRANGE(node
, btree
, tmp
));
1055 btree
= &node
->btree
[be16_to_cpu(node
->hdr
.count
)-1];
1057 memset((char *)btree
, 0, sizeof(xfs_da_node_entry_t
));
1058 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1059 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
1060 be16_add_cpu(&node
->hdr
.count
, -1);
1061 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1062 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
1065 * Copy the last hash value from the block to propagate upwards.
1068 drop_blk
->hashval
= be32_to_cpu(btree
->hashval
);
1072 * Unbalance the btree elements between two intermediate nodes,
1073 * move all Btree elements from one node into another.
1076 xfs_da_node_unbalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1077 xfs_da_state_blk_t
*save_blk
)
1079 xfs_da_intnode_t
*drop_node
, *save_node
;
1080 xfs_da_node_entry_t
*btree
;
1084 trace_xfs_da_node_unbalance(state
->args
);
1086 drop_node
= drop_blk
->bp
->b_addr
;
1087 save_node
= save_blk
->bp
->b_addr
;
1088 ASSERT(drop_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1089 ASSERT(save_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1090 tp
= state
->args
->trans
;
1093 * If the dying block has lower hashvals, then move all the
1094 * elements in the remaining block up to make a hole.
1096 if ((be32_to_cpu(drop_node
->btree
[0].hashval
) < be32_to_cpu(save_node
->btree
[ 0 ].hashval
)) ||
1097 (be32_to_cpu(drop_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)-1].hashval
) <
1098 be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
)))
1100 btree
= &save_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)];
1101 tmp
= be16_to_cpu(save_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1102 memmove(btree
, &save_node
->btree
[0], tmp
);
1103 btree
= &save_node
->btree
[0];
1104 xfs_trans_log_buf(tp
, save_blk
->bp
,
1105 XFS_DA_LOGRANGE(save_node
, btree
,
1106 (be16_to_cpu(save_node
->hdr
.count
) + be16_to_cpu(drop_node
->hdr
.count
)) *
1107 sizeof(xfs_da_node_entry_t
)));
1109 btree
= &save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)];
1110 xfs_trans_log_buf(tp
, save_blk
->bp
,
1111 XFS_DA_LOGRANGE(save_node
, btree
,
1112 be16_to_cpu(drop_node
->hdr
.count
) *
1113 sizeof(xfs_da_node_entry_t
)));
1117 * Move all the B-tree elements from drop_blk to save_blk.
1119 tmp
= be16_to_cpu(drop_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1120 memcpy(btree
, &drop_node
->btree
[0], tmp
);
1121 be16_add_cpu(&save_node
->hdr
.count
, be16_to_cpu(drop_node
->hdr
.count
));
1123 xfs_trans_log_buf(tp
, save_blk
->bp
,
1124 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1125 sizeof(save_node
->hdr
)));
1128 * Save the last hashval in the remaining block for upward propagation.
1130 save_blk
->hashval
= be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
);
1133 /*========================================================================
1134 * Routines used for finding things in the Btree.
1135 *========================================================================*/
1138 * Walk down the Btree looking for a particular filename, filling
1139 * in the state structure as we go.
1141 * We will set the state structure to point to each of the elements
1142 * in each of the nodes where either the hashval is or should be.
1144 * We support duplicate hashval's so for each entry in the current
1145 * node that could contain the desired hashval, descend. This is a
1146 * pruned depth-first tree search.
1149 xfs_da_node_lookup_int(xfs_da_state_t
*state
, int *result
)
1151 xfs_da_state_blk_t
*blk
;
1152 xfs_da_blkinfo_t
*curr
;
1153 xfs_da_intnode_t
*node
;
1154 xfs_da_node_entry_t
*btree
;
1156 int probe
, span
, max
, error
, retval
;
1157 xfs_dahash_t hashval
, btreehashval
;
1158 xfs_da_args_t
*args
;
1163 * Descend thru the B-tree searching each level for the right
1164 * node to use, until the right hashval is found.
1166 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? state
->mp
->m_dirleafblk
: 0;
1167 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1168 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1169 blk
++, state
->path
.active
++) {
1171 * Read the next node down in the tree.
1174 error
= xfs_da_node_read(args
->trans
, args
->dp
, blkno
,
1175 -1, &blk
->bp
, args
->whichfork
);
1178 state
->path
.active
--;
1181 curr
= blk
->bp
->b_addr
;
1182 blk
->magic
= be16_to_cpu(curr
->magic
);
1183 ASSERT(blk
->magic
== XFS_DA_NODE_MAGIC
||
1184 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1185 blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1188 * Search an intermediate node for a match.
1190 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1191 node
= blk
->bp
->b_addr
;
1192 max
= be16_to_cpu(node
->hdr
.count
);
1193 blk
->hashval
= be32_to_cpu(node
->btree
[max
-1].hashval
);
1196 * Binary search. (note: small blocks will skip loop)
1198 probe
= span
= max
/ 2;
1199 hashval
= args
->hashval
;
1200 for (btree
= &node
->btree
[probe
]; span
> 4;
1201 btree
= &node
->btree
[probe
]) {
1203 btreehashval
= be32_to_cpu(btree
->hashval
);
1204 if (btreehashval
< hashval
)
1206 else if (btreehashval
> hashval
)
1211 ASSERT((probe
>= 0) && (probe
< max
));
1212 ASSERT((span
<= 4) || (be32_to_cpu(btree
->hashval
) == hashval
));
1215 * Since we may have duplicate hashval's, find the first
1216 * matching hashval in the node.
1218 while ((probe
> 0) && (be32_to_cpu(btree
->hashval
) >= hashval
)) {
1222 while ((probe
< max
) && (be32_to_cpu(btree
->hashval
) < hashval
)) {
1228 * Pick the right block to descend on.
1232 blkno
= be32_to_cpu(node
->btree
[max
-1].before
);
1235 blkno
= be32_to_cpu(btree
->before
);
1237 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1238 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1240 } else if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1241 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
, NULL
);
1247 * A leaf block that ends in the hashval that we are interested in
1248 * (final hashval == search hashval) means that the next block may
1249 * contain more entries with the same hashval, shift upward to the
1250 * next leaf and keep searching.
1253 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1254 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1255 &blk
->index
, state
);
1256 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1257 retval
= xfs_attr_leaf_lookup_int(blk
->bp
, args
);
1258 blk
->index
= args
->index
;
1259 args
->blkno
= blk
->blkno
;
1262 return XFS_ERROR(EFSCORRUPTED
);
1264 if (((retval
== ENOENT
) || (retval
== ENOATTR
)) &&
1265 (blk
->hashval
== args
->hashval
)) {
1266 error
= xfs_da_path_shift(state
, &state
->path
, 1, 1,
1272 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1273 /* path_shift() gives ENOENT */
1274 retval
= XFS_ERROR(ENOATTR
);
1283 /*========================================================================
1285 *========================================================================*/
1288 * Link a new block into a doubly linked list of blocks (of whatever type).
1291 xfs_da_blk_link(xfs_da_state_t
*state
, xfs_da_state_blk_t
*old_blk
,
1292 xfs_da_state_blk_t
*new_blk
)
1294 xfs_da_blkinfo_t
*old_info
, *new_info
, *tmp_info
;
1295 xfs_da_args_t
*args
;
1296 int before
=0, error
;
1300 * Set up environment.
1303 ASSERT(args
!= NULL
);
1304 old_info
= old_blk
->bp
->b_addr
;
1305 new_info
= new_blk
->bp
->b_addr
;
1306 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1307 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1308 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1309 ASSERT(old_blk
->magic
== be16_to_cpu(old_info
->magic
));
1310 ASSERT(new_blk
->magic
== be16_to_cpu(new_info
->magic
));
1311 ASSERT(old_blk
->magic
== new_blk
->magic
);
1313 switch (old_blk
->magic
) {
1314 case XFS_ATTR_LEAF_MAGIC
:
1315 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1317 case XFS_DIR2_LEAFN_MAGIC
:
1318 before
= xfs_dir2_leafn_order(old_blk
->bp
, new_blk
->bp
);
1320 case XFS_DA_NODE_MAGIC
:
1321 before
= xfs_da_node_order(old_blk
->bp
, new_blk
->bp
);
1326 * Link blocks in appropriate order.
1330 * Link new block in before existing block.
1332 trace_xfs_da_link_before(args
);
1333 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1334 new_info
->back
= old_info
->back
;
1335 if (old_info
->back
) {
1336 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1337 be32_to_cpu(old_info
->back
),
1338 -1, &bp
, args
->whichfork
);
1342 tmp_info
= bp
->b_addr
;
1343 ASSERT(be16_to_cpu(tmp_info
->magic
) == be16_to_cpu(old_info
->magic
));
1344 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1345 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1346 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1348 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1351 * Link new block in after existing block.
1353 trace_xfs_da_link_after(args
);
1354 new_info
->forw
= old_info
->forw
;
1355 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1356 if (old_info
->forw
) {
1357 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1358 be32_to_cpu(old_info
->forw
),
1359 -1, &bp
, args
->whichfork
);
1363 tmp_info
= bp
->b_addr
;
1364 ASSERT(tmp_info
->magic
== old_info
->magic
);
1365 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1366 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1367 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1369 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1372 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1373 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1378 * Compare two intermediate nodes for "order".
1382 struct xfs_buf
*node1_bp
,
1383 struct xfs_buf
*node2_bp
)
1385 xfs_da_intnode_t
*node1
, *node2
;
1387 node1
= node1_bp
->b_addr
;
1388 node2
= node2_bp
->b_addr
;
1389 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) &&
1390 node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1391 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
1392 ((be32_to_cpu(node2
->btree
[0].hashval
) <
1393 be32_to_cpu(node1
->btree
[0].hashval
)) ||
1394 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
1395 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
1402 * Pick up the last hashvalue from an intermediate node.
1405 xfs_da_node_lasthash(
1409 xfs_da_intnode_t
*node
;
1412 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1414 *count
= be16_to_cpu(node
->hdr
.count
);
1415 if (!node
->hdr
.count
)
1417 return be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1421 * Unlink a block from a doubly linked list of blocks.
1423 STATIC
int /* error */
1424 xfs_da_blk_unlink(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1425 xfs_da_state_blk_t
*save_blk
)
1427 xfs_da_blkinfo_t
*drop_info
, *save_info
, *tmp_info
;
1428 xfs_da_args_t
*args
;
1433 * Set up environment.
1436 ASSERT(args
!= NULL
);
1437 save_info
= save_blk
->bp
->b_addr
;
1438 drop_info
= drop_blk
->bp
->b_addr
;
1439 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1440 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1441 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1442 ASSERT(save_blk
->magic
== be16_to_cpu(save_info
->magic
));
1443 ASSERT(drop_blk
->magic
== be16_to_cpu(drop_info
->magic
));
1444 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1445 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1446 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1447 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1448 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1451 * Unlink the leaf block from the doubly linked chain of leaves.
1453 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1454 trace_xfs_da_unlink_back(args
);
1455 save_info
->back
= drop_info
->back
;
1456 if (drop_info
->back
) {
1457 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1458 be32_to_cpu(drop_info
->back
),
1459 -1, &bp
, args
->whichfork
);
1463 tmp_info
= bp
->b_addr
;
1464 ASSERT(tmp_info
->magic
== save_info
->magic
);
1465 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1466 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1467 xfs_trans_log_buf(args
->trans
, bp
, 0,
1468 sizeof(*tmp_info
) - 1);
1471 trace_xfs_da_unlink_forward(args
);
1472 save_info
->forw
= drop_info
->forw
;
1473 if (drop_info
->forw
) {
1474 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1475 be32_to_cpu(drop_info
->forw
),
1476 -1, &bp
, args
->whichfork
);
1480 tmp_info
= bp
->b_addr
;
1481 ASSERT(tmp_info
->magic
== save_info
->magic
);
1482 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1483 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1484 xfs_trans_log_buf(args
->trans
, bp
, 0,
1485 sizeof(*tmp_info
) - 1);
1489 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1494 * Move a path "forward" or "!forward" one block at the current level.
1496 * This routine will adjust a "path" to point to the next block
1497 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1498 * Btree, including updating pointers to the intermediate nodes between
1499 * the new bottom and the root.
1502 xfs_da_path_shift(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
,
1503 int forward
, int release
, int *result
)
1505 xfs_da_state_blk_t
*blk
;
1506 xfs_da_blkinfo_t
*info
;
1507 xfs_da_intnode_t
*node
;
1508 xfs_da_args_t
*args
;
1509 xfs_dablk_t blkno
=0;
1512 trace_xfs_da_path_shift(state
->args
);
1515 * Roll up the Btree looking for the first block where our
1516 * current index is not at the edge of the block. Note that
1517 * we skip the bottom layer because we want the sibling block.
1520 ASSERT(args
!= NULL
);
1521 ASSERT(path
!= NULL
);
1522 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1523 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1524 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1525 ASSERT(blk
->bp
!= NULL
);
1526 node
= blk
->bp
->b_addr
;
1527 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1528 if (forward
&& (blk
->index
< be16_to_cpu(node
->hdr
.count
)-1)) {
1530 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1532 } else if (!forward
&& (blk
->index
> 0)) {
1534 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1539 *result
= XFS_ERROR(ENOENT
); /* we're out of our tree */
1540 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1545 * Roll down the edge of the subtree until we reach the
1546 * same depth we were at originally.
1548 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1550 * Release the old block.
1551 * (if it's dirty, trans won't actually let go)
1554 xfs_trans_brelse(args
->trans
, blk
->bp
);
1557 * Read the next child block.
1560 error
= xfs_da_node_read(args
->trans
, args
->dp
, blkno
, -1,
1561 &blk
->bp
, args
->whichfork
);
1564 ASSERT(blk
->bp
!= NULL
);
1565 info
= blk
->bp
->b_addr
;
1566 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1567 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1568 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
1569 blk
->magic
= be16_to_cpu(info
->magic
);
1570 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1571 node
= (xfs_da_intnode_t
*)info
;
1572 blk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1576 blk
->index
= be16_to_cpu(node
->hdr
.count
)-1;
1577 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1579 ASSERT(level
== path
->active
-1);
1581 switch(blk
->magic
) {
1582 case XFS_ATTR_LEAF_MAGIC
:
1583 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
,
1586 case XFS_DIR2_LEAFN_MAGIC
:
1587 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
,
1591 ASSERT(blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1592 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
1602 /*========================================================================
1604 *========================================================================*/
1607 * Implement a simple hash on a character string.
1608 * Rotate the hash value by 7 bits, then XOR each character in.
1609 * This is implemented with some source-level loop unrolling.
1612 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1617 * Do four characters at a time as long as we can.
1619 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1620 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1621 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1624 * Now do the rest of the characters.
1628 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1631 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1633 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1634 default: /* case 0: */
1641 struct xfs_da_args
*args
,
1642 const unsigned char *name
,
1645 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1646 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1650 xfs_default_hashname(
1651 struct xfs_name
*name
)
1653 return xfs_da_hashname(name
->name
, name
->len
);
1656 const struct xfs_nameops xfs_default_nameops
= {
1657 .hashname
= xfs_default_hashname
,
1658 .compname
= xfs_da_compname
1662 xfs_da_grow_inode_int(
1663 struct xfs_da_args
*args
,
1667 struct xfs_trans
*tp
= args
->trans
;
1668 struct xfs_inode
*dp
= args
->dp
;
1669 int w
= args
->whichfork
;
1670 xfs_drfsbno_t nblks
= dp
->i_d
.di_nblocks
;
1671 struct xfs_bmbt_irec map
, *mapp
;
1672 int nmap
, error
, got
, i
, mapi
;
1675 * Find a spot in the file space to put the new block.
1677 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
1682 * Try mapping it in one filesystem block.
1685 ASSERT(args
->firstblock
!= NULL
);
1686 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
1687 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
1688 args
->firstblock
, args
->total
, &map
, &nmap
,
1697 } else if (nmap
== 0 && count
> 1) {
1702 * If we didn't get it and the block might work if fragmented,
1703 * try without the CONTIG flag. Loop until we get it all.
1705 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
1706 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
1707 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
1708 c
= (int)(*bno
+ count
- b
);
1709 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
1710 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1711 args
->firstblock
, args
->total
,
1712 &mapp
[mapi
], &nmap
, args
->flist
);
1718 b
= mapp
[mapi
- 1].br_startoff
+
1719 mapp
[mapi
- 1].br_blockcount
;
1727 * Count the blocks we got, make sure it matches the total.
1729 for (i
= 0, got
= 0; i
< mapi
; i
++)
1730 got
+= mapp
[i
].br_blockcount
;
1731 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
1732 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
1734 error
= XFS_ERROR(ENOSPC
);
1738 /* account for newly allocated blocks in reserved blocks total */
1739 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
1748 * Add a block to the btree ahead of the file.
1749 * Return the new block number to the caller.
1753 struct xfs_da_args
*args
,
1754 xfs_dablk_t
*new_blkno
)
1760 trace_xfs_da_grow_inode(args
);
1762 if (args
->whichfork
== XFS_DATA_FORK
) {
1763 bno
= args
->dp
->i_mount
->m_dirleafblk
;
1764 count
= args
->dp
->i_mount
->m_dirblkfsbs
;
1770 error
= xfs_da_grow_inode_int(args
, &bno
, count
);
1772 *new_blkno
= (xfs_dablk_t
)bno
;
1777 * Ick. We need to always be able to remove a btree block, even
1778 * if there's no space reservation because the filesystem is full.
1779 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1780 * It swaps the target block with the last block in the file. The
1781 * last block in the file can always be removed since it can't cause
1782 * a bmap btree split to do that.
1785 xfs_da_swap_lastblock(
1786 xfs_da_args_t
*args
,
1787 xfs_dablk_t
*dead_blknop
,
1788 struct xfs_buf
**dead_bufp
)
1790 xfs_dablk_t dead_blkno
, last_blkno
, sib_blkno
, par_blkno
;
1791 struct xfs_buf
*dead_buf
, *last_buf
, *sib_buf
, *par_buf
;
1792 xfs_fileoff_t lastoff
;
1796 int error
, w
, entno
, level
, dead_level
;
1797 xfs_da_blkinfo_t
*dead_info
, *sib_info
;
1798 xfs_da_intnode_t
*par_node
, *dead_node
;
1799 xfs_dir2_leaf_t
*dead_leaf2
;
1800 xfs_dahash_t dead_hash
;
1802 trace_xfs_da_swap_lastblock(args
);
1804 dead_buf
= *dead_bufp
;
1805 dead_blkno
= *dead_blknop
;
1808 w
= args
->whichfork
;
1809 ASSERT(w
== XFS_DATA_FORK
);
1811 lastoff
= mp
->m_dirfreeblk
;
1812 error
= xfs_bmap_last_before(tp
, ip
, &lastoff
, w
);
1815 if (unlikely(lastoff
== 0)) {
1816 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
1818 return XFS_ERROR(EFSCORRUPTED
);
1821 * Read the last block in the btree space.
1823 last_blkno
= (xfs_dablk_t
)lastoff
- mp
->m_dirblkfsbs
;
1824 error
= xfs_da_node_read(tp
, ip
, last_blkno
, -1, &last_buf
, w
);
1828 * Copy the last block into the dead buffer and log it.
1830 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, mp
->m_dirblksize
);
1831 xfs_trans_log_buf(tp
, dead_buf
, 0, mp
->m_dirblksize
- 1);
1832 dead_info
= dead_buf
->b_addr
;
1834 * Get values from the moved block.
1836 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
1837 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
1839 dead_hash
= be32_to_cpu(dead_leaf2
->ents
[be16_to_cpu(dead_leaf2
->hdr
.count
) - 1].hashval
);
1841 ASSERT(dead_info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1842 dead_node
= (xfs_da_intnode_t
*)dead_info
;
1843 dead_level
= be16_to_cpu(dead_node
->hdr
.level
);
1844 dead_hash
= be32_to_cpu(dead_node
->btree
[be16_to_cpu(dead_node
->hdr
.count
) - 1].hashval
);
1846 sib_buf
= par_buf
= NULL
;
1848 * If the moved block has a left sibling, fix up the pointers.
1850 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
1851 error
= xfs_da_node_read(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
);
1854 sib_info
= sib_buf
->b_addr
;
1856 be32_to_cpu(sib_info
->forw
) != last_blkno
||
1857 sib_info
->magic
!= dead_info
->magic
)) {
1858 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1859 XFS_ERRLEVEL_LOW
, mp
);
1860 error
= XFS_ERROR(EFSCORRUPTED
);
1863 sib_info
->forw
= cpu_to_be32(dead_blkno
);
1864 xfs_trans_log_buf(tp
, sib_buf
,
1865 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
1866 sizeof(sib_info
->forw
)));
1870 * If the moved block has a right sibling, fix up the pointers.
1872 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
1873 error
= xfs_da_node_read(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
);
1876 sib_info
= sib_buf
->b_addr
;
1878 be32_to_cpu(sib_info
->back
) != last_blkno
||
1879 sib_info
->magic
!= dead_info
->magic
)) {
1880 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1881 XFS_ERRLEVEL_LOW
, mp
);
1882 error
= XFS_ERROR(EFSCORRUPTED
);
1885 sib_info
->back
= cpu_to_be32(dead_blkno
);
1886 xfs_trans_log_buf(tp
, sib_buf
,
1887 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
1888 sizeof(sib_info
->back
)));
1891 par_blkno
= mp
->m_dirleafblk
;
1894 * Walk down the tree looking for the parent of the moved block.
1897 error
= xfs_da_node_read(tp
, ip
, par_blkno
, -1, &par_buf
, w
);
1900 par_node
= par_buf
->b_addr
;
1901 if (unlikely(par_node
->hdr
.info
.magic
!=
1902 cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1903 (level
>= 0 && level
!= be16_to_cpu(par_node
->hdr
.level
) + 1))) {
1904 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1905 XFS_ERRLEVEL_LOW
, mp
);
1906 error
= XFS_ERROR(EFSCORRUPTED
);
1909 level
= be16_to_cpu(par_node
->hdr
.level
);
1911 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1912 be32_to_cpu(par_node
->btree
[entno
].hashval
) < dead_hash
;
1915 if (unlikely(entno
== be16_to_cpu(par_node
->hdr
.count
))) {
1916 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1917 XFS_ERRLEVEL_LOW
, mp
);
1918 error
= XFS_ERROR(EFSCORRUPTED
);
1921 par_blkno
= be32_to_cpu(par_node
->btree
[entno
].before
);
1922 if (level
== dead_level
+ 1)
1924 xfs_trans_brelse(tp
, par_buf
);
1928 * We're in the right parent block.
1929 * Look for the right entry.
1933 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1934 be32_to_cpu(par_node
->btree
[entno
].before
) != last_blkno
;
1937 if (entno
< be16_to_cpu(par_node
->hdr
.count
))
1939 par_blkno
= be32_to_cpu(par_node
->hdr
.info
.forw
);
1940 xfs_trans_brelse(tp
, par_buf
);
1942 if (unlikely(par_blkno
== 0)) {
1943 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1944 XFS_ERRLEVEL_LOW
, mp
);
1945 error
= XFS_ERROR(EFSCORRUPTED
);
1948 error
= xfs_da_node_read(tp
, ip
, par_blkno
, -1, &par_buf
, w
);
1951 par_node
= par_buf
->b_addr
;
1953 be16_to_cpu(par_node
->hdr
.level
) != level
||
1954 par_node
->hdr
.info
.magic
!= cpu_to_be16(XFS_DA_NODE_MAGIC
))) {
1955 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1956 XFS_ERRLEVEL_LOW
, mp
);
1957 error
= XFS_ERROR(EFSCORRUPTED
);
1963 * Update the parent entry pointing to the moved block.
1965 par_node
->btree
[entno
].before
= cpu_to_be32(dead_blkno
);
1966 xfs_trans_log_buf(tp
, par_buf
,
1967 XFS_DA_LOGRANGE(par_node
, &par_node
->btree
[entno
].before
,
1968 sizeof(par_node
->btree
[entno
].before
)));
1969 *dead_blknop
= last_blkno
;
1970 *dead_bufp
= last_buf
;
1974 xfs_trans_brelse(tp
, par_buf
);
1976 xfs_trans_brelse(tp
, sib_buf
);
1977 xfs_trans_brelse(tp
, last_buf
);
1982 * Remove a btree block from a directory or attribute.
1985 xfs_da_shrink_inode(
1986 xfs_da_args_t
*args
,
1987 xfs_dablk_t dead_blkno
,
1988 struct xfs_buf
*dead_buf
)
1991 int done
, error
, w
, count
;
1995 trace_xfs_da_shrink_inode(args
);
1998 w
= args
->whichfork
;
2001 if (w
== XFS_DATA_FORK
)
2002 count
= mp
->m_dirblkfsbs
;
2007 * Remove extents. If we get ENOSPC for a dir we have to move
2008 * the last block to the place we want to kill.
2010 if ((error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2011 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2012 0, args
->firstblock
, args
->flist
,
2013 &done
)) == ENOSPC
) {
2014 if (w
!= XFS_DATA_FORK
)
2016 if ((error
= xfs_da_swap_lastblock(args
, &dead_blkno
,
2023 xfs_trans_binval(tp
, dead_buf
);
2028 * See if the mapping(s) for this btree block are valid, i.e.
2029 * don't contain holes, are logically contiguous, and cover the whole range.
2032 xfs_da_map_covers_blocks(
2034 xfs_bmbt_irec_t
*mapp
,
2041 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2042 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2043 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2046 if (off
!= mapp
[i
].br_startoff
) {
2049 off
+= mapp
[i
].br_blockcount
;
2051 return off
== bno
+ count
;
2055 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2057 * For the single map case, it is assumed that the caller has provided a pointer
2058 * to a valid xfs_buf_map. For the multiple map case, this function will
2059 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2060 * map pointer with the allocated map.
2063 xfs_buf_map_from_irec(
2064 struct xfs_mount
*mp
,
2065 struct xfs_buf_map
**mapp
,
2066 unsigned int *nmaps
,
2067 struct xfs_bmbt_irec
*irecs
,
2068 unsigned int nirecs
)
2070 struct xfs_buf_map
*map
;
2073 ASSERT(*nmaps
== 1);
2074 ASSERT(nirecs
>= 1);
2077 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
), KM_SLEEP
);
2085 for (i
= 0; i
< *nmaps
; i
++) {
2086 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2087 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2088 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2089 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2095 * Map the block we are given ready for reading. There are three possible return
2097 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2098 * caller knows not to execute a subsequent read.
2099 * 0 - if we mapped the block successfully
2100 * >0 - positive error number if there was an error.
2104 struct xfs_trans
*trans
,
2105 struct xfs_inode
*dp
,
2107 xfs_daddr_t mappedbno
,
2109 struct xfs_buf_map
**map
,
2112 struct xfs_mount
*mp
= dp
->i_mount
;
2115 struct xfs_bmbt_irec irec
;
2116 struct xfs_bmbt_irec
*irecs
= &irec
;
2119 ASSERT(map
&& *map
);
2120 ASSERT(*nmaps
== 1);
2122 nfsb
= (whichfork
== XFS_DATA_FORK
) ? mp
->m_dirblkfsbs
: 1;
2125 * Caller doesn't have a mapping. -2 means don't complain
2126 * if we land in a hole.
2128 if (mappedbno
== -1 || mappedbno
== -2) {
2130 * Optimize the one-block case.
2133 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
, KM_SLEEP
);
2136 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2137 &nirecs
, xfs_bmapi_aflag(whichfork
));
2141 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2142 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2143 irecs
->br_blockcount
= nfsb
;
2144 irecs
->br_state
= 0;
2148 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2149 error
= mappedbno
== -2 ? -1 : XFS_ERROR(EFSCORRUPTED
);
2150 if (unlikely(error
== EFSCORRUPTED
)) {
2151 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2153 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2154 __func__
, (long long)bno
,
2155 (long long)dp
->i_ino
);
2156 for (i
= 0; i
< *nmaps
; i
++) {
2158 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2160 (long long)irecs
[i
].br_startoff
,
2161 (long long)irecs
[i
].br_startblock
,
2162 (long long)irecs
[i
].br_blockcount
,
2166 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2167 XFS_ERRLEVEL_LOW
, mp
);
2171 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2179 * Get a buffer for the dir/attr block.
2183 struct xfs_trans
*trans
,
2184 struct xfs_inode
*dp
,
2186 xfs_daddr_t mappedbno
,
2187 struct xfs_buf
**bpp
,
2191 struct xfs_buf_map map
;
2192 struct xfs_buf_map
*mapp
;
2199 error
= xfs_dabuf_map(trans
, dp
, bno
, mappedbno
, whichfork
,
2202 /* mapping a hole is not an error, but we don't continue */
2208 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2210 error
= bp
? bp
->b_error
: XFS_ERROR(EIO
);
2212 xfs_trans_brelse(trans
, bp
);
2226 * Get a buffer for the dir/attr block, fill in the contents.
2230 struct xfs_trans
*trans
,
2231 struct xfs_inode
*dp
,
2233 xfs_daddr_t mappedbno
,
2234 struct xfs_buf
**bpp
,
2236 const struct xfs_buf_ops
*ops
)
2239 struct xfs_buf_map map
;
2240 struct xfs_buf_map
*mapp
;
2247 error
= xfs_dabuf_map(trans
, dp
, bno
, mappedbno
, whichfork
,
2250 /* mapping a hole is not an error, but we don't continue */
2256 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2257 dp
->i_mount
->m_ddev_targp
,
2258 mapp
, nmap
, 0, &bp
, ops
);
2262 if (whichfork
== XFS_ATTR_FORK
)
2263 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2265 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2268 * This verification code will be moved to a CRC verification callback
2269 * function so just leave it here unchanged until then.
2272 xfs_dir2_data_hdr_t
*hdr
= bp
->b_addr
;
2273 xfs_dir2_free_t
*free
= bp
->b_addr
;
2274 xfs_da_blkinfo_t
*info
= bp
->b_addr
;
2276 struct xfs_mount
*mp
= dp
->i_mount
;
2278 magic
= be16_to_cpu(info
->magic
);
2279 magic1
= be32_to_cpu(hdr
->magic
);
2281 XFS_TEST_ERROR((magic
!= XFS_DA_NODE_MAGIC
) &&
2282 (magic
!= XFS_ATTR_LEAF_MAGIC
) &&
2283 (magic
!= XFS_DIR2_LEAF1_MAGIC
) &&
2284 (magic
!= XFS_DIR2_LEAFN_MAGIC
) &&
2285 (magic1
!= XFS_DIR2_BLOCK_MAGIC
) &&
2286 (magic1
!= XFS_DIR2_DATA_MAGIC
) &&
2287 (free
->hdr
.magic
!= cpu_to_be32(XFS_DIR2_FREE_MAGIC
)),
2288 mp
, XFS_ERRTAG_DA_READ_BUF
,
2289 XFS_RANDOM_DA_READ_BUF
))) {
2290 trace_xfs_da_btree_corrupt(bp
, _RET_IP_
);
2291 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2292 XFS_ERRLEVEL_LOW
, mp
, info
);
2293 error
= XFS_ERROR(EFSCORRUPTED
);
2294 xfs_trans_brelse(trans
, bp
);
2307 * Readahead the dir/attr block.
2311 struct xfs_trans
*trans
,
2312 struct xfs_inode
*dp
,
2314 xfs_daddr_t mappedbno
,
2316 const struct xfs_buf_ops
*ops
)
2318 struct xfs_buf_map map
;
2319 struct xfs_buf_map
*mapp
;
2325 error
= xfs_dabuf_map(trans
, dp
, bno
, mappedbno
, whichfork
,
2328 /* mapping a hole is not an error, but we don't continue */
2334 mappedbno
= mapp
[0].bm_bn
;
2335 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, ops
);
2346 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
2349 * Allocate a dir-state structure.
2350 * We don't put them on the stack since they're large.
2353 xfs_da_state_alloc(void)
2355 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
2359 * Kill the altpath contents of a da-state structure.
2362 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
2366 for (i
= 0; i
< state
->altpath
.active
; i
++)
2367 state
->altpath
.blk
[i
].bp
= NULL
;
2368 state
->altpath
.active
= 0;
2372 * Free a da-state structure.
2375 xfs_da_state_free(xfs_da_state_t
*state
)
2377 xfs_da_state_kill_altpath(state
);
2379 memset((char *)state
, 0, sizeof(*state
));
2381 kmem_zone_free(xfs_da_state_zone
, state
);