2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
27 #include "xfs_mount.h"
28 #include "xfs_da_btree.h"
29 #include "xfs_bmap_btree.h"
31 #include "xfs_dir2_format.h"
32 #include "xfs_dir2_priv.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_inode_item.h"
36 #include "xfs_alloc.h"
39 #include "xfs_attr_leaf.h"
40 #include "xfs_error.h"
41 #include "xfs_trace.h"
46 * Routines to implement directories as Btrees of hashed names.
49 /*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
54 * Routines used for growing the Btree.
56 STATIC
int xfs_da_root_split(xfs_da_state_t
*state
,
57 xfs_da_state_blk_t
*existing_root
,
58 xfs_da_state_blk_t
*new_child
);
59 STATIC
int xfs_da_node_split(xfs_da_state_t
*state
,
60 xfs_da_state_blk_t
*existing_blk
,
61 xfs_da_state_blk_t
*split_blk
,
62 xfs_da_state_blk_t
*blk_to_add
,
65 STATIC
void xfs_da_node_rebalance(xfs_da_state_t
*state
,
66 xfs_da_state_blk_t
*node_blk_1
,
67 xfs_da_state_blk_t
*node_blk_2
);
68 STATIC
void xfs_da_node_add(xfs_da_state_t
*state
,
69 xfs_da_state_blk_t
*old_node_blk
,
70 xfs_da_state_blk_t
*new_node_blk
);
73 * Routines used for shrinking the Btree.
75 STATIC
int xfs_da_root_join(xfs_da_state_t
*state
,
76 xfs_da_state_blk_t
*root_blk
);
77 STATIC
int xfs_da_node_toosmall(xfs_da_state_t
*state
, int *retval
);
78 STATIC
void xfs_da_node_remove(xfs_da_state_t
*state
,
79 xfs_da_state_blk_t
*drop_blk
);
80 STATIC
void xfs_da_node_unbalance(xfs_da_state_t
*state
,
81 xfs_da_state_blk_t
*src_node_blk
,
82 xfs_da_state_blk_t
*dst_node_blk
);
87 STATIC uint
xfs_da_node_lasthash(xfs_dabuf_t
*bp
, int *count
);
88 STATIC
int xfs_da_node_order(xfs_dabuf_t
*node1_bp
, xfs_dabuf_t
*node2_bp
);
89 STATIC xfs_dabuf_t
*xfs_da_buf_make(int nbuf
, xfs_buf_t
**bps
);
90 STATIC
int xfs_da_blk_unlink(xfs_da_state_t
*state
,
91 xfs_da_state_blk_t
*drop_blk
,
92 xfs_da_state_blk_t
*save_blk
);
93 STATIC
void xfs_da_state_kill_altpath(xfs_da_state_t
*state
);
95 /*========================================================================
96 * Routines used for growing the Btree.
97 *========================================================================*/
100 * Create the initial contents of an intermediate node.
103 xfs_da_node_create(xfs_da_args_t
*args
, xfs_dablk_t blkno
, int level
,
104 xfs_dabuf_t
**bpp
, int whichfork
)
106 xfs_da_intnode_t
*node
;
111 trace_xfs_da_node_create(args
);
114 error
= xfs_da_get_buf(tp
, args
->dp
, blkno
, -1, &bp
, whichfork
);
119 node
->hdr
.info
.forw
= 0;
120 node
->hdr
.info
.back
= 0;
121 node
->hdr
.info
.magic
= cpu_to_be16(XFS_DA_NODE_MAGIC
);
122 node
->hdr
.info
.pad
= 0;
124 node
->hdr
.level
= cpu_to_be16(level
);
126 xfs_da_log_buf(tp
, bp
,
127 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
134 * Split a leaf node, rebalance, then possibly split
135 * intermediate nodes, rebalance, etc.
138 xfs_da_split(xfs_da_state_t
*state
)
140 xfs_da_state_blk_t
*oldblk
, *newblk
, *addblk
;
141 xfs_da_intnode_t
*node
;
143 int max
, action
, error
, i
;
145 trace_xfs_da_split(state
->args
);
148 * Walk back up the tree splitting/inserting/adjusting as necessary.
149 * If we need to insert and there isn't room, split the node, then
150 * decide which fragment to insert the new block from below into.
151 * Note that we may split the root this way, but we need more fixup.
153 max
= state
->path
.active
- 1;
154 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
155 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
156 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
158 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
159 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
160 oldblk
= &state
->path
.blk
[i
];
161 newblk
= &state
->altpath
.blk
[i
];
164 * If a leaf node then
165 * Allocate a new leaf node, then rebalance across them.
166 * else if an intermediate node then
167 * We split on the last layer, must we split the node?
169 switch (oldblk
->magic
) {
170 case XFS_ATTR_LEAF_MAGIC
:
171 error
= xfs_attr_leaf_split(state
, oldblk
, newblk
);
172 if ((error
!= 0) && (error
!= ENOSPC
)) {
173 return(error
); /* GROT: attr is inconsistent */
180 * Entry wouldn't fit, split the leaf again.
182 state
->extravalid
= 1;
184 state
->extraafter
= 0; /* before newblk */
185 trace_xfs_attr_leaf_split_before(state
->args
);
186 error
= xfs_attr_leaf_split(state
, oldblk
,
189 state
->extraafter
= 1; /* after newblk */
190 trace_xfs_attr_leaf_split_after(state
->args
);
191 error
= xfs_attr_leaf_split(state
, newblk
,
195 return(error
); /* GROT: attr inconsistent */
198 case XFS_DIR2_LEAFN_MAGIC
:
199 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
204 case XFS_DA_NODE_MAGIC
:
205 error
= xfs_da_node_split(state
, oldblk
, newblk
, addblk
,
207 xfs_da_buf_done(addblk
->bp
);
210 return(error
); /* GROT: dir is inconsistent */
212 * Record the newly split block for the next time thru?
222 * Update the btree to show the new hashval for this child.
224 xfs_da_fixhashpath(state
, &state
->path
);
226 * If we won't need this block again, it's getting dropped
227 * from the active path by the loop control, so we need
228 * to mark it done now.
230 if (i
> 0 || !addblk
)
231 xfs_da_buf_done(oldblk
->bp
);
237 * Split the root node.
239 ASSERT(state
->path
.active
== 0);
240 oldblk
= &state
->path
.blk
[0];
241 error
= xfs_da_root_split(state
, oldblk
, addblk
);
243 xfs_da_buf_done(oldblk
->bp
);
244 xfs_da_buf_done(addblk
->bp
);
246 return(error
); /* GROT: dir is inconsistent */
250 * Update pointers to the node which used to be block 0 and
251 * just got bumped because of the addition of a new root node.
252 * There might be three blocks involved if a double split occurred,
253 * and the original block 0 could be at any position in the list.
256 node
= oldblk
->bp
->data
;
257 if (node
->hdr
.info
.forw
) {
258 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
261 ASSERT(state
->extravalid
);
262 bp
= state
->extrablk
.bp
;
265 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
266 xfs_da_log_buf(state
->args
->trans
, bp
,
267 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
268 sizeof(node
->hdr
.info
)));
270 node
= oldblk
->bp
->data
;
271 if (node
->hdr
.info
.back
) {
272 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
275 ASSERT(state
->extravalid
);
276 bp
= state
->extrablk
.bp
;
279 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
280 xfs_da_log_buf(state
->args
->trans
, bp
,
281 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
282 sizeof(node
->hdr
.info
)));
284 xfs_da_buf_done(oldblk
->bp
);
285 xfs_da_buf_done(addblk
->bp
);
291 * Split the root. We have to create a new root and point to the two
292 * parts (the split old root) that we just created. Copy block zero to
293 * the EOF, extending the inode in process.
295 STATIC
int /* error */
296 xfs_da_root_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
297 xfs_da_state_blk_t
*blk2
)
299 xfs_da_intnode_t
*node
, *oldroot
;
307 xfs_dir2_leaf_t
*leaf
;
309 trace_xfs_da_root_split(state
->args
);
312 * Copy the existing (incorrect) block from the root node position
313 * to a free space somewhere.
316 ASSERT(args
!= NULL
);
317 error
= xfs_da_grow_inode(args
, &blkno
);
323 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
328 oldroot
= blk1
->bp
->data
;
329 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
)) {
330 size
= (int)((char *)&oldroot
->btree
[be16_to_cpu(oldroot
->hdr
.count
)] -
333 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
));
334 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
335 size
= (int)((char *)&leaf
->ents
[be16_to_cpu(leaf
->hdr
.count
)] -
338 memcpy(node
, oldroot
, size
);
339 xfs_da_log_buf(tp
, bp
, 0, size
- 1);
340 xfs_da_buf_done(blk1
->bp
);
345 * Set up the new root node.
347 error
= xfs_da_node_create(args
,
348 (args
->whichfork
== XFS_DATA_FORK
) ? mp
->m_dirleafblk
: 0,
349 be16_to_cpu(node
->hdr
.level
) + 1, &bp
, args
->whichfork
);
353 node
->btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
354 node
->btree
[0].before
= cpu_to_be32(blk1
->blkno
);
355 node
->btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
356 node
->btree
[1].before
= cpu_to_be32(blk2
->blkno
);
357 node
->hdr
.count
= cpu_to_be16(2);
360 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
361 ASSERT(blk1
->blkno
>= mp
->m_dirleafblk
&&
362 blk1
->blkno
< mp
->m_dirfreeblk
);
363 ASSERT(blk2
->blkno
>= mp
->m_dirleafblk
&&
364 blk2
->blkno
< mp
->m_dirfreeblk
);
368 /* Header is already logged by xfs_da_node_create */
369 xfs_da_log_buf(tp
, bp
,
370 XFS_DA_LOGRANGE(node
, node
->btree
,
371 sizeof(xfs_da_node_entry_t
) * 2));
378 * Split the node, rebalance, then add the new entry.
380 STATIC
int /* error */
381 xfs_da_node_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
382 xfs_da_state_blk_t
*newblk
,
383 xfs_da_state_blk_t
*addblk
,
384 int treelevel
, int *result
)
386 xfs_da_intnode_t
*node
;
391 trace_xfs_da_node_split(state
->args
);
393 node
= oldblk
->bp
->data
;
394 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
397 * With V2 dirs the extra block is data or freespace.
399 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
400 newcount
= 1 + useextra
;
402 * Do we have to split the node?
404 if ((be16_to_cpu(node
->hdr
.count
) + newcount
) > state
->node_ents
) {
406 * Allocate a new node, add to the doubly linked chain of
407 * nodes, then move some of our excess entries into it.
409 error
= xfs_da_grow_inode(state
->args
, &blkno
);
411 return(error
); /* GROT: dir is inconsistent */
413 error
= xfs_da_node_create(state
->args
, blkno
, treelevel
,
414 &newblk
->bp
, state
->args
->whichfork
);
416 return(error
); /* GROT: dir is inconsistent */
417 newblk
->blkno
= blkno
;
418 newblk
->magic
= XFS_DA_NODE_MAGIC
;
419 xfs_da_node_rebalance(state
, oldblk
, newblk
);
420 error
= xfs_da_blk_link(state
, oldblk
, newblk
);
429 * Insert the new entry(s) into the correct block
430 * (updating last hashval in the process).
432 * xfs_da_node_add() inserts BEFORE the given index,
433 * and as a result of using node_lookup_int() we always
434 * point to a valid entry (not after one), but a split
435 * operation always results in a new block whose hashvals
436 * FOLLOW the current block.
438 * If we had double-split op below us, then add the extra block too.
440 node
= oldblk
->bp
->data
;
441 if (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)) {
443 xfs_da_node_add(state
, oldblk
, addblk
);
445 if (state
->extraafter
)
447 xfs_da_node_add(state
, oldblk
, &state
->extrablk
);
448 state
->extravalid
= 0;
452 xfs_da_node_add(state
, newblk
, addblk
);
454 if (state
->extraafter
)
456 xfs_da_node_add(state
, newblk
, &state
->extrablk
);
457 state
->extravalid
= 0;
465 * Balance the btree elements between two intermediate nodes,
466 * usually one full and one empty.
468 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
471 xfs_da_node_rebalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
472 xfs_da_state_blk_t
*blk2
)
474 xfs_da_intnode_t
*node1
, *node2
, *tmpnode
;
475 xfs_da_node_entry_t
*btree_s
, *btree_d
;
479 trace_xfs_da_node_rebalance(state
->args
);
481 node1
= blk1
->bp
->data
;
482 node2
= blk2
->bp
->data
;
484 * Figure out how many entries need to move, and in which direction.
485 * Swap the nodes around if that makes it simpler.
487 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
488 ((be32_to_cpu(node2
->btree
[0].hashval
) < be32_to_cpu(node1
->btree
[0].hashval
)) ||
489 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
490 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
495 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
496 ASSERT(node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
497 count
= (be16_to_cpu(node1
->hdr
.count
) - be16_to_cpu(node2
->hdr
.count
)) / 2;
500 tp
= state
->args
->trans
;
502 * Two cases: high-to-low and low-to-high.
506 * Move elements in node2 up to make a hole.
508 if ((tmp
= be16_to_cpu(node2
->hdr
.count
)) > 0) {
509 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
510 btree_s
= &node2
->btree
[0];
511 btree_d
= &node2
->btree
[count
];
512 memmove(btree_d
, btree_s
, tmp
);
516 * Move the req'd B-tree elements from high in node1 to
519 be16_add_cpu(&node2
->hdr
.count
, count
);
520 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
521 btree_s
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
) - count
];
522 btree_d
= &node2
->btree
[0];
523 memcpy(btree_d
, btree_s
, tmp
);
524 be16_add_cpu(&node1
->hdr
.count
, -count
);
527 * Move the req'd B-tree elements from low in node2 to
531 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
532 btree_s
= &node2
->btree
[0];
533 btree_d
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
)];
534 memcpy(btree_d
, btree_s
, tmp
);
535 be16_add_cpu(&node1
->hdr
.count
, count
);
536 xfs_da_log_buf(tp
, blk1
->bp
,
537 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
540 * Move elements in node2 down to fill the hole.
542 tmp
= be16_to_cpu(node2
->hdr
.count
) - count
;
543 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
544 btree_s
= &node2
->btree
[count
];
545 btree_d
= &node2
->btree
[0];
546 memmove(btree_d
, btree_s
, tmp
);
547 be16_add_cpu(&node2
->hdr
.count
, -count
);
551 * Log header of node 1 and all current bits of node 2.
553 xfs_da_log_buf(tp
, blk1
->bp
,
554 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, sizeof(node1
->hdr
)));
555 xfs_da_log_buf(tp
, blk2
->bp
,
556 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
558 sizeof(node2
->btree
[0]) * be16_to_cpu(node2
->hdr
.count
)));
561 * Record the last hashval from each block for upward propagation.
562 * (note: don't use the swapped node pointers)
564 node1
= blk1
->bp
->data
;
565 node2
= blk2
->bp
->data
;
566 blk1
->hashval
= be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
);
567 blk2
->hashval
= be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
);
570 * Adjust the expected index for insertion.
572 if (blk1
->index
>= be16_to_cpu(node1
->hdr
.count
)) {
573 blk2
->index
= blk1
->index
- be16_to_cpu(node1
->hdr
.count
);
574 blk1
->index
= be16_to_cpu(node1
->hdr
.count
) + 1; /* make it invalid */
579 * Add a new entry to an intermediate node.
582 xfs_da_node_add(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
583 xfs_da_state_blk_t
*newblk
)
585 xfs_da_intnode_t
*node
;
586 xfs_da_node_entry_t
*btree
;
589 trace_xfs_da_node_add(state
->args
);
591 node
= oldblk
->bp
->data
;
592 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
593 ASSERT((oldblk
->index
>= 0) && (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)));
594 ASSERT(newblk
->blkno
!= 0);
595 if (state
->args
->whichfork
== XFS_DATA_FORK
)
596 ASSERT(newblk
->blkno
>= state
->mp
->m_dirleafblk
&&
597 newblk
->blkno
< state
->mp
->m_dirfreeblk
);
600 * We may need to make some room before we insert the new node.
603 btree
= &node
->btree
[ oldblk
->index
];
604 if (oldblk
->index
< be16_to_cpu(node
->hdr
.count
)) {
605 tmp
= (be16_to_cpu(node
->hdr
.count
) - oldblk
->index
) * (uint
)sizeof(*btree
);
606 memmove(btree
+ 1, btree
, tmp
);
608 btree
->hashval
= cpu_to_be32(newblk
->hashval
);
609 btree
->before
= cpu_to_be32(newblk
->blkno
);
610 xfs_da_log_buf(state
->args
->trans
, oldblk
->bp
,
611 XFS_DA_LOGRANGE(node
, btree
, tmp
+ sizeof(*btree
)));
612 be16_add_cpu(&node
->hdr
.count
, 1);
613 xfs_da_log_buf(state
->args
->trans
, oldblk
->bp
,
614 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
617 * Copy the last hash value from the oldblk to propagate upwards.
619 oldblk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1 ].hashval
);
622 /*========================================================================
623 * Routines used for shrinking the Btree.
624 *========================================================================*/
627 * Deallocate an empty leaf node, remove it from its parent,
628 * possibly deallocating that block, etc...
631 xfs_da_join(xfs_da_state_t
*state
)
633 xfs_da_state_blk_t
*drop_blk
, *save_blk
;
636 trace_xfs_da_join(state
->args
);
639 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
640 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
641 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
642 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
643 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
646 * Walk back up the tree joining/deallocating as necessary.
647 * When we stop dropping blocks, break out.
649 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
650 state
->path
.active
--) {
652 * See if we can combine the block with a neighbor.
653 * (action == 0) => no options, just leave
654 * (action == 1) => coalesce, then unlink
655 * (action == 2) => block empty, unlink it
657 switch (drop_blk
->magic
) {
658 case XFS_ATTR_LEAF_MAGIC
:
659 error
= xfs_attr_leaf_toosmall(state
, &action
);
664 xfs_attr_leaf_unbalance(state
, drop_blk
, save_blk
);
666 case XFS_DIR2_LEAFN_MAGIC
:
667 error
= xfs_dir2_leafn_toosmall(state
, &action
);
672 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
674 case XFS_DA_NODE_MAGIC
:
676 * Remove the offending node, fixup hashvals,
677 * check for a toosmall neighbor.
679 xfs_da_node_remove(state
, drop_blk
);
680 xfs_da_fixhashpath(state
, &state
->path
);
681 error
= xfs_da_node_toosmall(state
, &action
);
686 xfs_da_node_unbalance(state
, drop_blk
, save_blk
);
689 xfs_da_fixhashpath(state
, &state
->altpath
);
690 error
= xfs_da_blk_unlink(state
, drop_blk
, save_blk
);
691 xfs_da_state_kill_altpath(state
);
694 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
701 * We joined all the way to the top. If it turns out that
702 * we only have one entry in the root, make the child block
705 xfs_da_node_remove(state
, drop_blk
);
706 xfs_da_fixhashpath(state
, &state
->path
);
707 error
= xfs_da_root_join(state
, &state
->path
.blk
[0]);
713 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
715 __be16 magic
= blkinfo
->magic
;
718 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
719 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
721 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
722 ASSERT(!blkinfo
->forw
);
723 ASSERT(!blkinfo
->back
);
726 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
730 * We have only one entry in the root. Copy the only remaining child of
731 * the old root to block 0 as the new root node.
734 xfs_da_root_join(xfs_da_state_t
*state
, xfs_da_state_blk_t
*root_blk
)
736 xfs_da_intnode_t
*oldroot
;
742 trace_xfs_da_root_join(state
->args
);
745 ASSERT(args
!= NULL
);
746 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
747 oldroot
= root_blk
->bp
->data
;
748 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
749 ASSERT(!oldroot
->hdr
.info
.forw
);
750 ASSERT(!oldroot
->hdr
.info
.back
);
753 * If the root has more than one child, then don't do anything.
755 if (be16_to_cpu(oldroot
->hdr
.count
) > 1)
759 * Read in the (only) child block, then copy those bytes into
760 * the root block's buffer and free the original child block.
762 child
= be32_to_cpu(oldroot
->btree
[0].before
);
764 error
= xfs_da_read_buf(args
->trans
, args
->dp
, child
, -1, &bp
,
769 xfs_da_blkinfo_onlychild_validate(bp
->data
,
770 be16_to_cpu(oldroot
->hdr
.level
));
772 memcpy(root_blk
->bp
->data
, bp
->data
, state
->blocksize
);
773 xfs_da_log_buf(args
->trans
, root_blk
->bp
, 0, state
->blocksize
- 1);
774 error
= xfs_da_shrink_inode(args
, child
, bp
);
779 * Check a node block and its neighbors to see if the block should be
780 * collapsed into one or the other neighbor. Always keep the block
781 * with the smaller block number.
782 * If the current block is over 50% full, don't try to join it, return 0.
783 * If the block is empty, fill in the state structure and return 2.
784 * If it can be collapsed, fill in the state structure and return 1.
785 * If nothing can be done, return 0.
788 xfs_da_node_toosmall(xfs_da_state_t
*state
, int *action
)
790 xfs_da_intnode_t
*node
;
791 xfs_da_state_blk_t
*blk
;
792 xfs_da_blkinfo_t
*info
;
793 int count
, forward
, error
, retval
, i
;
798 * Check for the degenerate case of the block being over 50% full.
799 * If so, it's not worth even looking to see if we might be able
800 * to coalesce with a sibling.
802 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
803 info
= blk
->bp
->data
;
804 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
805 node
= (xfs_da_intnode_t
*)info
;
806 count
= be16_to_cpu(node
->hdr
.count
);
807 if (count
> (state
->node_ents
>> 1)) {
808 *action
= 0; /* blk over 50%, don't try to join */
809 return(0); /* blk over 50%, don't try to join */
813 * Check for the degenerate case of the block being empty.
814 * If the block is empty, we'll simply delete it, no need to
815 * coalesce it with a sibling block. We choose (arbitrarily)
816 * to merge with the forward block unless it is NULL.
820 * Make altpath point to the block we want to keep and
821 * path point to the block we want to drop (this one).
823 forward
= (info
->forw
!= 0);
824 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
825 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
838 * Examine each sibling block to see if we can coalesce with
839 * at least 25% free space to spare. We need to figure out
840 * whether to merge with the forward or the backward block.
841 * We prefer coalescing with the lower numbered sibling so as
842 * to shrink a directory over time.
844 /* start with smaller blk num */
845 forward
= (be32_to_cpu(info
->forw
) < be32_to_cpu(info
->back
));
846 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
848 blkno
= be32_to_cpu(info
->forw
);
850 blkno
= be32_to_cpu(info
->back
);
853 error
= xfs_da_read_buf(state
->args
->trans
, state
->args
->dp
,
854 blkno
, -1, &bp
, state
->args
->whichfork
);
859 node
= (xfs_da_intnode_t
*)info
;
860 count
= state
->node_ents
;
861 count
-= state
->node_ents
>> 2;
862 count
-= be16_to_cpu(node
->hdr
.count
);
864 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
865 count
-= be16_to_cpu(node
->hdr
.count
);
866 xfs_da_brelse(state
->args
->trans
, bp
);
868 break; /* fits with at least 25% to spare */
876 * Make altpath point to the block we want to keep (the lower
877 * numbered block) and path point to the block we want to drop.
879 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
880 if (blkno
< blk
->blkno
) {
881 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
891 error
= xfs_da_path_shift(state
, &state
->path
, forward
,
906 * Walk back up the tree adjusting hash values as necessary,
907 * when we stop making changes, return.
910 xfs_da_fixhashpath(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
)
912 xfs_da_state_blk_t
*blk
;
913 xfs_da_intnode_t
*node
;
914 xfs_da_node_entry_t
*btree
;
915 xfs_dahash_t lasthash
=0;
918 level
= path
->active
-1;
919 blk
= &path
->blk
[ level
];
920 switch (blk
->magic
) {
921 case XFS_ATTR_LEAF_MAGIC
:
922 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
926 case XFS_DIR2_LEAFN_MAGIC
:
927 lasthash
= xfs_dir2_leafn_lasthash(blk
->bp
, &count
);
931 case XFS_DA_NODE_MAGIC
:
932 lasthash
= xfs_da_node_lasthash(blk
->bp
, &count
);
937 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
938 node
= blk
->bp
->data
;
939 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
940 btree
= &node
->btree
[ blk
->index
];
941 if (be32_to_cpu(btree
->hashval
) == lasthash
)
943 blk
->hashval
= lasthash
;
944 btree
->hashval
= cpu_to_be32(lasthash
);
945 xfs_da_log_buf(state
->args
->trans
, blk
->bp
,
946 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
948 lasthash
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
953 * Remove an entry from an intermediate node.
956 xfs_da_node_remove(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
)
958 xfs_da_intnode_t
*node
;
959 xfs_da_node_entry_t
*btree
;
962 trace_xfs_da_node_remove(state
->args
);
964 node
= drop_blk
->bp
->data
;
965 ASSERT(drop_blk
->index
< be16_to_cpu(node
->hdr
.count
));
966 ASSERT(drop_blk
->index
>= 0);
969 * Copy over the offending entry, or just zero it out.
971 btree
= &node
->btree
[drop_blk
->index
];
972 if (drop_blk
->index
< (be16_to_cpu(node
->hdr
.count
)-1)) {
973 tmp
= be16_to_cpu(node
->hdr
.count
) - drop_blk
->index
- 1;
974 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
975 memmove(btree
, btree
+ 1, tmp
);
976 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
977 XFS_DA_LOGRANGE(node
, btree
, tmp
));
978 btree
= &node
->btree
[be16_to_cpu(node
->hdr
.count
)-1];
980 memset((char *)btree
, 0, sizeof(xfs_da_node_entry_t
));
981 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
982 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
983 be16_add_cpu(&node
->hdr
.count
, -1);
984 xfs_da_log_buf(state
->args
->trans
, drop_blk
->bp
,
985 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
988 * Copy the last hash value from the block to propagate upwards.
991 drop_blk
->hashval
= be32_to_cpu(btree
->hashval
);
995 * Unbalance the btree elements between two intermediate nodes,
996 * move all Btree elements from one node into another.
999 xfs_da_node_unbalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1000 xfs_da_state_blk_t
*save_blk
)
1002 xfs_da_intnode_t
*drop_node
, *save_node
;
1003 xfs_da_node_entry_t
*btree
;
1007 trace_xfs_da_node_unbalance(state
->args
);
1009 drop_node
= drop_blk
->bp
->data
;
1010 save_node
= save_blk
->bp
->data
;
1011 ASSERT(drop_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1012 ASSERT(save_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1013 tp
= state
->args
->trans
;
1016 * If the dying block has lower hashvals, then move all the
1017 * elements in the remaining block up to make a hole.
1019 if ((be32_to_cpu(drop_node
->btree
[0].hashval
) < be32_to_cpu(save_node
->btree
[ 0 ].hashval
)) ||
1020 (be32_to_cpu(drop_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)-1].hashval
) <
1021 be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
)))
1023 btree
= &save_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)];
1024 tmp
= be16_to_cpu(save_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1025 memmove(btree
, &save_node
->btree
[0], tmp
);
1026 btree
= &save_node
->btree
[0];
1027 xfs_da_log_buf(tp
, save_blk
->bp
,
1028 XFS_DA_LOGRANGE(save_node
, btree
,
1029 (be16_to_cpu(save_node
->hdr
.count
) + be16_to_cpu(drop_node
->hdr
.count
)) *
1030 sizeof(xfs_da_node_entry_t
)));
1032 btree
= &save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)];
1033 xfs_da_log_buf(tp
, save_blk
->bp
,
1034 XFS_DA_LOGRANGE(save_node
, btree
,
1035 be16_to_cpu(drop_node
->hdr
.count
) *
1036 sizeof(xfs_da_node_entry_t
)));
1040 * Move all the B-tree elements from drop_blk to save_blk.
1042 tmp
= be16_to_cpu(drop_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1043 memcpy(btree
, &drop_node
->btree
[0], tmp
);
1044 be16_add_cpu(&save_node
->hdr
.count
, be16_to_cpu(drop_node
->hdr
.count
));
1046 xfs_da_log_buf(tp
, save_blk
->bp
,
1047 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1048 sizeof(save_node
->hdr
)));
1051 * Save the last hashval in the remaining block for upward propagation.
1053 save_blk
->hashval
= be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
);
1056 /*========================================================================
1057 * Routines used for finding things in the Btree.
1058 *========================================================================*/
1061 * Walk down the Btree looking for a particular filename, filling
1062 * in the state structure as we go.
1064 * We will set the state structure to point to each of the elements
1065 * in each of the nodes where either the hashval is or should be.
1067 * We support duplicate hashval's so for each entry in the current
1068 * node that could contain the desired hashval, descend. This is a
1069 * pruned depth-first tree search.
1072 xfs_da_node_lookup_int(xfs_da_state_t
*state
, int *result
)
1074 xfs_da_state_blk_t
*blk
;
1075 xfs_da_blkinfo_t
*curr
;
1076 xfs_da_intnode_t
*node
;
1077 xfs_da_node_entry_t
*btree
;
1079 int probe
, span
, max
, error
, retval
;
1080 xfs_dahash_t hashval
, btreehashval
;
1081 xfs_da_args_t
*args
;
1086 * Descend thru the B-tree searching each level for the right
1087 * node to use, until the right hashval is found.
1089 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? state
->mp
->m_dirleafblk
: 0;
1090 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1091 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1092 blk
++, state
->path
.active
++) {
1094 * Read the next node down in the tree.
1097 error
= xfs_da_read_buf(args
->trans
, args
->dp
, blkno
,
1098 -1, &blk
->bp
, args
->whichfork
);
1101 state
->path
.active
--;
1104 curr
= blk
->bp
->data
;
1105 blk
->magic
= be16_to_cpu(curr
->magic
);
1106 ASSERT(blk
->magic
== XFS_DA_NODE_MAGIC
||
1107 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1108 blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1111 * Search an intermediate node for a match.
1113 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1114 node
= blk
->bp
->data
;
1115 max
= be16_to_cpu(node
->hdr
.count
);
1116 blk
->hashval
= be32_to_cpu(node
->btree
[max
-1].hashval
);
1119 * Binary search. (note: small blocks will skip loop)
1121 probe
= span
= max
/ 2;
1122 hashval
= args
->hashval
;
1123 for (btree
= &node
->btree
[probe
]; span
> 4;
1124 btree
= &node
->btree
[probe
]) {
1126 btreehashval
= be32_to_cpu(btree
->hashval
);
1127 if (btreehashval
< hashval
)
1129 else if (btreehashval
> hashval
)
1134 ASSERT((probe
>= 0) && (probe
< max
));
1135 ASSERT((span
<= 4) || (be32_to_cpu(btree
->hashval
) == hashval
));
1138 * Since we may have duplicate hashval's, find the first
1139 * matching hashval in the node.
1141 while ((probe
> 0) && (be32_to_cpu(btree
->hashval
) >= hashval
)) {
1145 while ((probe
< max
) && (be32_to_cpu(btree
->hashval
) < hashval
)) {
1151 * Pick the right block to descend on.
1155 blkno
= be32_to_cpu(node
->btree
[max
-1].before
);
1158 blkno
= be32_to_cpu(btree
->before
);
1160 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1161 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1163 } else if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1164 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
, NULL
);
1170 * A leaf block that ends in the hashval that we are interested in
1171 * (final hashval == search hashval) means that the next block may
1172 * contain more entries with the same hashval, shift upward to the
1173 * next leaf and keep searching.
1176 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1177 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1178 &blk
->index
, state
);
1179 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1180 retval
= xfs_attr_leaf_lookup_int(blk
->bp
, args
);
1181 blk
->index
= args
->index
;
1182 args
->blkno
= blk
->blkno
;
1185 return XFS_ERROR(EFSCORRUPTED
);
1187 if (((retval
== ENOENT
) || (retval
== ENOATTR
)) &&
1188 (blk
->hashval
== args
->hashval
)) {
1189 error
= xfs_da_path_shift(state
, &state
->path
, 1, 1,
1195 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1196 /* path_shift() gives ENOENT */
1197 retval
= XFS_ERROR(ENOATTR
);
1206 /*========================================================================
1208 *========================================================================*/
1211 * Link a new block into a doubly linked list of blocks (of whatever type).
1214 xfs_da_blk_link(xfs_da_state_t
*state
, xfs_da_state_blk_t
*old_blk
,
1215 xfs_da_state_blk_t
*new_blk
)
1217 xfs_da_blkinfo_t
*old_info
, *new_info
, *tmp_info
;
1218 xfs_da_args_t
*args
;
1219 int before
=0, error
;
1223 * Set up environment.
1226 ASSERT(args
!= NULL
);
1227 old_info
= old_blk
->bp
->data
;
1228 new_info
= new_blk
->bp
->data
;
1229 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1230 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1231 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1232 ASSERT(old_blk
->magic
== be16_to_cpu(old_info
->magic
));
1233 ASSERT(new_blk
->magic
== be16_to_cpu(new_info
->magic
));
1234 ASSERT(old_blk
->magic
== new_blk
->magic
);
1236 switch (old_blk
->magic
) {
1237 case XFS_ATTR_LEAF_MAGIC
:
1238 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1240 case XFS_DIR2_LEAFN_MAGIC
:
1241 before
= xfs_dir2_leafn_order(old_blk
->bp
, new_blk
->bp
);
1243 case XFS_DA_NODE_MAGIC
:
1244 before
= xfs_da_node_order(old_blk
->bp
, new_blk
->bp
);
1249 * Link blocks in appropriate order.
1253 * Link new block in before existing block.
1255 trace_xfs_da_link_before(args
);
1256 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1257 new_info
->back
= old_info
->back
;
1258 if (old_info
->back
) {
1259 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1260 be32_to_cpu(old_info
->back
),
1261 -1, &bp
, args
->whichfork
);
1265 tmp_info
= bp
->data
;
1266 ASSERT(be16_to_cpu(tmp_info
->magic
) == be16_to_cpu(old_info
->magic
));
1267 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1268 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1269 xfs_da_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1270 xfs_da_buf_done(bp
);
1272 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1275 * Link new block in after existing block.
1277 trace_xfs_da_link_after(args
);
1278 new_info
->forw
= old_info
->forw
;
1279 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1280 if (old_info
->forw
) {
1281 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1282 be32_to_cpu(old_info
->forw
),
1283 -1, &bp
, args
->whichfork
);
1287 tmp_info
= bp
->data
;
1288 ASSERT(tmp_info
->magic
== old_info
->magic
);
1289 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1290 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1291 xfs_da_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1292 xfs_da_buf_done(bp
);
1294 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1297 xfs_da_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1298 xfs_da_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1303 * Compare two intermediate nodes for "order".
1306 xfs_da_node_order(xfs_dabuf_t
*node1_bp
, xfs_dabuf_t
*node2_bp
)
1308 xfs_da_intnode_t
*node1
, *node2
;
1310 node1
= node1_bp
->data
;
1311 node2
= node2_bp
->data
;
1312 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) &&
1313 node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1314 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
1315 ((be32_to_cpu(node2
->btree
[0].hashval
) <
1316 be32_to_cpu(node1
->btree
[0].hashval
)) ||
1317 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
1318 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
1325 * Pick up the last hashvalue from an intermediate node.
1328 xfs_da_node_lasthash(xfs_dabuf_t
*bp
, int *count
)
1330 xfs_da_intnode_t
*node
;
1333 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1335 *count
= be16_to_cpu(node
->hdr
.count
);
1336 if (!node
->hdr
.count
)
1338 return be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1342 * Unlink a block from a doubly linked list of blocks.
1344 STATIC
int /* error */
1345 xfs_da_blk_unlink(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1346 xfs_da_state_blk_t
*save_blk
)
1348 xfs_da_blkinfo_t
*drop_info
, *save_info
, *tmp_info
;
1349 xfs_da_args_t
*args
;
1354 * Set up environment.
1357 ASSERT(args
!= NULL
);
1358 save_info
= save_blk
->bp
->data
;
1359 drop_info
= drop_blk
->bp
->data
;
1360 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1361 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1362 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1363 ASSERT(save_blk
->magic
== be16_to_cpu(save_info
->magic
));
1364 ASSERT(drop_blk
->magic
== be16_to_cpu(drop_info
->magic
));
1365 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1366 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1367 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1368 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1369 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1372 * Unlink the leaf block from the doubly linked chain of leaves.
1374 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1375 trace_xfs_da_unlink_back(args
);
1376 save_info
->back
= drop_info
->back
;
1377 if (drop_info
->back
) {
1378 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1379 be32_to_cpu(drop_info
->back
),
1380 -1, &bp
, args
->whichfork
);
1384 tmp_info
= bp
->data
;
1385 ASSERT(tmp_info
->magic
== save_info
->magic
);
1386 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1387 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1388 xfs_da_log_buf(args
->trans
, bp
, 0,
1389 sizeof(*tmp_info
) - 1);
1390 xfs_da_buf_done(bp
);
1393 trace_xfs_da_unlink_forward(args
);
1394 save_info
->forw
= drop_info
->forw
;
1395 if (drop_info
->forw
) {
1396 error
= xfs_da_read_buf(args
->trans
, args
->dp
,
1397 be32_to_cpu(drop_info
->forw
),
1398 -1, &bp
, args
->whichfork
);
1402 tmp_info
= bp
->data
;
1403 ASSERT(tmp_info
->magic
== save_info
->magic
);
1404 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1405 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1406 xfs_da_log_buf(args
->trans
, bp
, 0,
1407 sizeof(*tmp_info
) - 1);
1408 xfs_da_buf_done(bp
);
1412 xfs_da_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1417 * Move a path "forward" or "!forward" one block at the current level.
1419 * This routine will adjust a "path" to point to the next block
1420 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1421 * Btree, including updating pointers to the intermediate nodes between
1422 * the new bottom and the root.
1425 xfs_da_path_shift(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
,
1426 int forward
, int release
, int *result
)
1428 xfs_da_state_blk_t
*blk
;
1429 xfs_da_blkinfo_t
*info
;
1430 xfs_da_intnode_t
*node
;
1431 xfs_da_args_t
*args
;
1432 xfs_dablk_t blkno
=0;
1436 * Roll up the Btree looking for the first block where our
1437 * current index is not at the edge of the block. Note that
1438 * we skip the bottom layer because we want the sibling block.
1441 ASSERT(args
!= NULL
);
1442 ASSERT(path
!= NULL
);
1443 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1444 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1445 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1446 ASSERT(blk
->bp
!= NULL
);
1447 node
= blk
->bp
->data
;
1448 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1449 if (forward
&& (blk
->index
< be16_to_cpu(node
->hdr
.count
)-1)) {
1451 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1453 } else if (!forward
&& (blk
->index
> 0)) {
1455 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1460 *result
= XFS_ERROR(ENOENT
); /* we're out of our tree */
1461 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1466 * Roll down the edge of the subtree until we reach the
1467 * same depth we were at originally.
1469 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1471 * Release the old block.
1472 * (if it's dirty, trans won't actually let go)
1475 xfs_da_brelse(args
->trans
, blk
->bp
);
1478 * Read the next child block.
1481 error
= xfs_da_read_buf(args
->trans
, args
->dp
, blkno
, -1,
1482 &blk
->bp
, args
->whichfork
);
1485 ASSERT(blk
->bp
!= NULL
);
1486 info
= blk
->bp
->data
;
1487 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1488 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1489 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
1490 blk
->magic
= be16_to_cpu(info
->magic
);
1491 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1492 node
= (xfs_da_intnode_t
*)info
;
1493 blk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1497 blk
->index
= be16_to_cpu(node
->hdr
.count
)-1;
1498 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1500 ASSERT(level
== path
->active
-1);
1502 switch(blk
->magic
) {
1503 case XFS_ATTR_LEAF_MAGIC
:
1504 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
,
1507 case XFS_DIR2_LEAFN_MAGIC
:
1508 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
,
1512 ASSERT(blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1513 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
1523 /*========================================================================
1525 *========================================================================*/
1528 * Implement a simple hash on a character string.
1529 * Rotate the hash value by 7 bits, then XOR each character in.
1530 * This is implemented with some source-level loop unrolling.
1533 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1538 * Do four characters at a time as long as we can.
1540 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1541 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1542 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1545 * Now do the rest of the characters.
1549 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1552 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1554 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1555 default: /* case 0: */
1562 struct xfs_da_args
*args
,
1563 const unsigned char *name
,
1566 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1567 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1571 xfs_default_hashname(
1572 struct xfs_name
*name
)
1574 return xfs_da_hashname(name
->name
, name
->len
);
1577 const struct xfs_nameops xfs_default_nameops
= {
1578 .hashname
= xfs_default_hashname
,
1579 .compname
= xfs_da_compname
1583 xfs_da_grow_inode_int(
1584 struct xfs_da_args
*args
,
1588 struct xfs_trans
*tp
= args
->trans
;
1589 struct xfs_inode
*dp
= args
->dp
;
1590 int w
= args
->whichfork
;
1591 xfs_drfsbno_t nblks
= dp
->i_d
.di_nblocks
;
1592 struct xfs_bmbt_irec map
, *mapp
;
1593 int nmap
, error
, got
, i
, mapi
;
1596 * Find a spot in the file space to put the new block.
1598 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
1603 * Try mapping it in one filesystem block.
1606 ASSERT(args
->firstblock
!= NULL
);
1607 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
1608 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
1609 args
->firstblock
, args
->total
, &map
, &nmap
,
1618 } else if (nmap
== 0 && count
> 1) {
1623 * If we didn't get it and the block might work if fragmented,
1624 * try without the CONTIG flag. Loop until we get it all.
1626 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
1627 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
1628 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
1629 c
= (int)(*bno
+ count
- b
);
1630 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
1631 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1632 args
->firstblock
, args
->total
,
1633 &mapp
[mapi
], &nmap
, args
->flist
);
1639 b
= mapp
[mapi
- 1].br_startoff
+
1640 mapp
[mapi
- 1].br_blockcount
;
1648 * Count the blocks we got, make sure it matches the total.
1650 for (i
= 0, got
= 0; i
< mapi
; i
++)
1651 got
+= mapp
[i
].br_blockcount
;
1652 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
1653 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
1655 error
= XFS_ERROR(ENOSPC
);
1659 /* account for newly allocated blocks in reserved blocks total */
1660 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
1669 * Add a block to the btree ahead of the file.
1670 * Return the new block number to the caller.
1674 struct xfs_da_args
*args
,
1675 xfs_dablk_t
*new_blkno
)
1681 trace_xfs_da_grow_inode(args
);
1683 if (args
->whichfork
== XFS_DATA_FORK
) {
1684 bno
= args
->dp
->i_mount
->m_dirleafblk
;
1685 count
= args
->dp
->i_mount
->m_dirblkfsbs
;
1691 error
= xfs_da_grow_inode_int(args
, &bno
, count
);
1693 *new_blkno
= (xfs_dablk_t
)bno
;
1698 * Ick. We need to always be able to remove a btree block, even
1699 * if there's no space reservation because the filesystem is full.
1700 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1701 * It swaps the target block with the last block in the file. The
1702 * last block in the file can always be removed since it can't cause
1703 * a bmap btree split to do that.
1706 xfs_da_swap_lastblock(xfs_da_args_t
*args
, xfs_dablk_t
*dead_blknop
,
1707 xfs_dabuf_t
**dead_bufp
)
1709 xfs_dablk_t dead_blkno
, last_blkno
, sib_blkno
, par_blkno
;
1710 xfs_dabuf_t
*dead_buf
, *last_buf
, *sib_buf
, *par_buf
;
1711 xfs_fileoff_t lastoff
;
1715 int error
, w
, entno
, level
, dead_level
;
1716 xfs_da_blkinfo_t
*dead_info
, *sib_info
;
1717 xfs_da_intnode_t
*par_node
, *dead_node
;
1718 xfs_dir2_leaf_t
*dead_leaf2
;
1719 xfs_dahash_t dead_hash
;
1721 trace_xfs_da_swap_lastblock(args
);
1723 dead_buf
= *dead_bufp
;
1724 dead_blkno
= *dead_blknop
;
1727 w
= args
->whichfork
;
1728 ASSERT(w
== XFS_DATA_FORK
);
1730 lastoff
= mp
->m_dirfreeblk
;
1731 error
= xfs_bmap_last_before(tp
, ip
, &lastoff
, w
);
1734 if (unlikely(lastoff
== 0)) {
1735 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
1737 return XFS_ERROR(EFSCORRUPTED
);
1740 * Read the last block in the btree space.
1742 last_blkno
= (xfs_dablk_t
)lastoff
- mp
->m_dirblkfsbs
;
1743 if ((error
= xfs_da_read_buf(tp
, ip
, last_blkno
, -1, &last_buf
, w
)))
1746 * Copy the last block into the dead buffer and log it.
1748 memcpy(dead_buf
->data
, last_buf
->data
, mp
->m_dirblksize
);
1749 xfs_da_log_buf(tp
, dead_buf
, 0, mp
->m_dirblksize
- 1);
1750 dead_info
= dead_buf
->data
;
1752 * Get values from the moved block.
1754 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
1755 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
1757 dead_hash
= be32_to_cpu(dead_leaf2
->ents
[be16_to_cpu(dead_leaf2
->hdr
.count
) - 1].hashval
);
1759 ASSERT(dead_info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1760 dead_node
= (xfs_da_intnode_t
*)dead_info
;
1761 dead_level
= be16_to_cpu(dead_node
->hdr
.level
);
1762 dead_hash
= be32_to_cpu(dead_node
->btree
[be16_to_cpu(dead_node
->hdr
.count
) - 1].hashval
);
1764 sib_buf
= par_buf
= NULL
;
1766 * If the moved block has a left sibling, fix up the pointers.
1768 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
1769 if ((error
= xfs_da_read_buf(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
)))
1771 sib_info
= sib_buf
->data
;
1773 be32_to_cpu(sib_info
->forw
) != last_blkno
||
1774 sib_info
->magic
!= dead_info
->magic
)) {
1775 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1776 XFS_ERRLEVEL_LOW
, mp
);
1777 error
= XFS_ERROR(EFSCORRUPTED
);
1780 sib_info
->forw
= cpu_to_be32(dead_blkno
);
1781 xfs_da_log_buf(tp
, sib_buf
,
1782 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
1783 sizeof(sib_info
->forw
)));
1784 xfs_da_buf_done(sib_buf
);
1788 * If the moved block has a right sibling, fix up the pointers.
1790 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
1791 if ((error
= xfs_da_read_buf(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
)))
1793 sib_info
= sib_buf
->data
;
1795 be32_to_cpu(sib_info
->back
) != last_blkno
||
1796 sib_info
->magic
!= dead_info
->magic
)) {
1797 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1798 XFS_ERRLEVEL_LOW
, mp
);
1799 error
= XFS_ERROR(EFSCORRUPTED
);
1802 sib_info
->back
= cpu_to_be32(dead_blkno
);
1803 xfs_da_log_buf(tp
, sib_buf
,
1804 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
1805 sizeof(sib_info
->back
)));
1806 xfs_da_buf_done(sib_buf
);
1809 par_blkno
= mp
->m_dirleafblk
;
1812 * Walk down the tree looking for the parent of the moved block.
1815 if ((error
= xfs_da_read_buf(tp
, ip
, par_blkno
, -1, &par_buf
, w
)))
1817 par_node
= par_buf
->data
;
1818 if (unlikely(par_node
->hdr
.info
.magic
!=
1819 cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1820 (level
>= 0 && level
!= be16_to_cpu(par_node
->hdr
.level
) + 1))) {
1821 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1822 XFS_ERRLEVEL_LOW
, mp
);
1823 error
= XFS_ERROR(EFSCORRUPTED
);
1826 level
= be16_to_cpu(par_node
->hdr
.level
);
1828 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1829 be32_to_cpu(par_node
->btree
[entno
].hashval
) < dead_hash
;
1832 if (unlikely(entno
== be16_to_cpu(par_node
->hdr
.count
))) {
1833 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1834 XFS_ERRLEVEL_LOW
, mp
);
1835 error
= XFS_ERROR(EFSCORRUPTED
);
1838 par_blkno
= be32_to_cpu(par_node
->btree
[entno
].before
);
1839 if (level
== dead_level
+ 1)
1841 xfs_da_brelse(tp
, par_buf
);
1845 * We're in the right parent block.
1846 * Look for the right entry.
1850 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1851 be32_to_cpu(par_node
->btree
[entno
].before
) != last_blkno
;
1854 if (entno
< be16_to_cpu(par_node
->hdr
.count
))
1856 par_blkno
= be32_to_cpu(par_node
->hdr
.info
.forw
);
1857 xfs_da_brelse(tp
, par_buf
);
1859 if (unlikely(par_blkno
== 0)) {
1860 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1861 XFS_ERRLEVEL_LOW
, mp
);
1862 error
= XFS_ERROR(EFSCORRUPTED
);
1865 if ((error
= xfs_da_read_buf(tp
, ip
, par_blkno
, -1, &par_buf
, w
)))
1867 par_node
= par_buf
->data
;
1869 be16_to_cpu(par_node
->hdr
.level
) != level
||
1870 par_node
->hdr
.info
.magic
!= cpu_to_be16(XFS_DA_NODE_MAGIC
))) {
1871 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1872 XFS_ERRLEVEL_LOW
, mp
);
1873 error
= XFS_ERROR(EFSCORRUPTED
);
1879 * Update the parent entry pointing to the moved block.
1881 par_node
->btree
[entno
].before
= cpu_to_be32(dead_blkno
);
1882 xfs_da_log_buf(tp
, par_buf
,
1883 XFS_DA_LOGRANGE(par_node
, &par_node
->btree
[entno
].before
,
1884 sizeof(par_node
->btree
[entno
].before
)));
1885 xfs_da_buf_done(par_buf
);
1886 xfs_da_buf_done(dead_buf
);
1887 *dead_blknop
= last_blkno
;
1888 *dead_bufp
= last_buf
;
1892 xfs_da_brelse(tp
, par_buf
);
1894 xfs_da_brelse(tp
, sib_buf
);
1895 xfs_da_brelse(tp
, last_buf
);
1900 * Remove a btree block from a directory or attribute.
1903 xfs_da_shrink_inode(xfs_da_args_t
*args
, xfs_dablk_t dead_blkno
,
1904 xfs_dabuf_t
*dead_buf
)
1907 int done
, error
, w
, count
;
1911 trace_xfs_da_shrink_inode(args
);
1914 w
= args
->whichfork
;
1917 if (w
== XFS_DATA_FORK
)
1918 count
= mp
->m_dirblkfsbs
;
1923 * Remove extents. If we get ENOSPC for a dir we have to move
1924 * the last block to the place we want to kill.
1926 if ((error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
1927 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1928 0, args
->firstblock
, args
->flist
,
1929 &done
)) == ENOSPC
) {
1930 if (w
!= XFS_DATA_FORK
)
1932 if ((error
= xfs_da_swap_lastblock(args
, &dead_blkno
,
1939 xfs_da_binval(tp
, dead_buf
);
1944 * See if the mapping(s) for this btree block are valid, i.e.
1945 * don't contain holes, are logically contiguous, and cover the whole range.
1948 xfs_da_map_covers_blocks(
1950 xfs_bmbt_irec_t
*mapp
,
1957 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
1958 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
1959 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
1962 if (off
!= mapp
[i
].br_startoff
) {
1965 off
+= mapp
[i
].br_blockcount
;
1967 return off
== bno
+ count
;
1972 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1979 xfs_daddr_t
*mappedbnop
,
1984 xfs_buf_t
*bp
= NULL
;
1988 xfs_bmbt_irec_t map
;
1989 xfs_bmbt_irec_t
*mapp
;
1990 xfs_daddr_t mappedbno
;
1998 nfsb
= (whichfork
== XFS_DATA_FORK
) ? mp
->m_dirblkfsbs
: 1;
1999 mappedbno
= *mappedbnop
;
2001 * Caller doesn't have a mapping. -2 means don't complain
2002 * if we land in a hole.
2004 if (mappedbno
== -1 || mappedbno
== -2) {
2006 * Optimize the one-block case.
2011 mapp
= kmem_alloc(sizeof(*mapp
) * nfsb
, KM_SLEEP
);
2014 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, mapp
,
2015 &nmap
, xfs_bmapi_aflag(whichfork
));
2019 map
.br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2020 map
.br_startoff
= (xfs_fileoff_t
)bno
;
2021 map
.br_blockcount
= nfsb
;
2025 if (!xfs_da_map_covers_blocks(nmap
, mapp
, bno
, nfsb
)) {
2026 error
= mappedbno
== -2 ? 0 : XFS_ERROR(EFSCORRUPTED
);
2027 if (unlikely(error
== EFSCORRUPTED
)) {
2028 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2029 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2030 __func__
, (long long)bno
,
2031 (long long)dp
->i_ino
);
2032 for (i
= 0; i
< nmap
; i
++) {
2034 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2036 (long long)mapp
[i
].br_startoff
,
2037 (long long)mapp
[i
].br_startblock
,
2038 (long long)mapp
[i
].br_blockcount
,
2042 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2043 XFS_ERRLEVEL_LOW
, mp
);
2047 if (caller
!= 3 && nmap
> 1) {
2048 bplist
= kmem_alloc(sizeof(*bplist
) * nmap
, KM_SLEEP
);
2053 * Turn the mapping(s) into buffer(s).
2055 for (i
= 0; i
< nmap
; i
++) {
2058 mappedbno
= XFS_FSB_TO_DADDR(mp
, mapp
[i
].br_startblock
);
2060 *mappedbnop
= mappedbno
;
2061 nmapped
= (int)XFS_FSB_TO_BB(mp
, mapp
[i
].br_blockcount
);
2064 bp
= xfs_trans_get_buf(trans
, mp
->m_ddev_targp
,
2065 mappedbno
, nmapped
, 0);
2066 error
= bp
? bp
->b_error
: XFS_ERROR(EIO
);
2071 error
= xfs_trans_read_buf(mp
, trans
, mp
->m_ddev_targp
,
2072 mappedbno
, nmapped
, 0, &bp
);
2075 xfs_buf_readahead(mp
->m_ddev_targp
, mappedbno
, nmapped
);
2082 xfs_trans_brelse(trans
, bp
);
2088 if (whichfork
== XFS_ATTR_FORK
)
2089 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2091 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2094 bplist
[nbplist
++] = bp
;
2098 * Build a dabuf structure.
2101 rbp
= xfs_da_buf_make(nbplist
, bplist
);
2103 rbp
= xfs_da_buf_make(1, &bp
);
2107 * For read_buf, check the magic number.
2110 xfs_dir2_data_hdr_t
*hdr
= rbp
->data
;
2111 xfs_dir2_free_t
*free
= rbp
->data
;
2112 xfs_da_blkinfo_t
*info
= rbp
->data
;
2115 magic
= be16_to_cpu(info
->magic
);
2116 magic1
= be32_to_cpu(hdr
->magic
);
2118 XFS_TEST_ERROR((magic
!= XFS_DA_NODE_MAGIC
) &&
2119 (magic
!= XFS_ATTR_LEAF_MAGIC
) &&
2120 (magic
!= XFS_DIR2_LEAF1_MAGIC
) &&
2121 (magic
!= XFS_DIR2_LEAFN_MAGIC
) &&
2122 (magic1
!= XFS_DIR2_BLOCK_MAGIC
) &&
2123 (magic1
!= XFS_DIR2_DATA_MAGIC
) &&
2124 (free
->hdr
.magic
!= cpu_to_be32(XFS_DIR2_FREE_MAGIC
)),
2125 mp
, XFS_ERRTAG_DA_READ_BUF
,
2126 XFS_RANDOM_DA_READ_BUF
))) {
2127 trace_xfs_da_btree_corrupt(rbp
->bps
[0], _RET_IP_
);
2128 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2129 XFS_ERRLEVEL_LOW
, mp
, info
);
2130 error
= XFS_ERROR(EFSCORRUPTED
);
2131 xfs_da_brelse(trans
, rbp
);
2147 for (i
= 0; i
< nbplist
; i
++)
2148 xfs_trans_brelse(trans
, bplist
[i
]);
2160 * Get a buffer for the dir/attr block.
2167 xfs_daddr_t mappedbno
,
2171 return xfs_da_do_buf(trans
, dp
, bno
, &mappedbno
, bpp
, whichfork
, 0);
2175 * Get a buffer for the dir/attr block, fill in the contents.
2182 xfs_daddr_t mappedbno
,
2186 return xfs_da_do_buf(trans
, dp
, bno
, &mappedbno
, bpp
, whichfork
, 1);
2190 * Readahead the dir/attr block.
2202 if (xfs_da_do_buf(trans
, dp
, bno
, &rval
, NULL
, whichfork
, 3))
2208 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
2209 kmem_zone_t
*xfs_dabuf_zone
; /* dabuf zone */
2212 * Allocate a dir-state structure.
2213 * We don't put them on the stack since they're large.
2216 xfs_da_state_alloc(void)
2218 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
2222 * Kill the altpath contents of a da-state structure.
2225 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
2229 for (i
= 0; i
< state
->altpath
.active
; i
++) {
2230 if (state
->altpath
.blk
[i
].bp
) {
2231 if (state
->altpath
.blk
[i
].bp
!= state
->path
.blk
[i
].bp
)
2232 xfs_da_buf_done(state
->altpath
.blk
[i
].bp
);
2233 state
->altpath
.blk
[i
].bp
= NULL
;
2236 state
->altpath
.active
= 0;
2240 * Free a da-state structure.
2243 xfs_da_state_free(xfs_da_state_t
*state
)
2247 xfs_da_state_kill_altpath(state
);
2248 for (i
= 0; i
< state
->path
.active
; i
++) {
2249 if (state
->path
.blk
[i
].bp
)
2250 xfs_da_buf_done(state
->path
.blk
[i
].bp
);
2252 if (state
->extravalid
&& state
->extrablk
.bp
)
2253 xfs_da_buf_done(state
->extrablk
.bp
);
2255 memset((char *)state
, 0, sizeof(*state
));
2257 kmem_zone_free(xfs_da_state_zone
, state
);
2264 STATIC xfs_dabuf_t
*
2265 xfs_da_buf_make(int nbuf
, xfs_buf_t
**bps
)
2273 dabuf
= kmem_zone_alloc(xfs_dabuf_zone
, KM_NOFS
);
2275 dabuf
= kmem_alloc(XFS_DA_BUF_SIZE(nbuf
), KM_NOFS
);
2280 dabuf
->bbcount
= (short)BTOBB(XFS_BUF_COUNT(bp
));
2281 dabuf
->data
= bp
->b_addr
;
2285 for (i
= 0, dabuf
->bbcount
= 0; i
< nbuf
; i
++) {
2286 dabuf
->bps
[i
] = bp
= bps
[i
];
2287 dabuf
->bbcount
+= BTOBB(XFS_BUF_COUNT(bp
));
2289 dabuf
->data
= kmem_alloc(BBTOB(dabuf
->bbcount
), KM_SLEEP
);
2290 for (i
= off
= 0; i
< nbuf
; i
++, off
+= XFS_BUF_COUNT(bp
)) {
2292 memcpy((char *)dabuf
->data
+ off
, bp
->b_addr
,
2303 xfs_da_buf_clean(xfs_dabuf_t
*dabuf
)
2310 ASSERT(dabuf
->nbuf
> 1);
2312 for (i
= off
= 0; i
< dabuf
->nbuf
;
2313 i
++, off
+= XFS_BUF_COUNT(bp
)) {
2315 memcpy(bp
->b_addr
, dabuf
->data
+ off
,
2325 xfs_da_buf_done(xfs_dabuf_t
*dabuf
)
2328 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2330 xfs_da_buf_clean(dabuf
);
2331 if (dabuf
->nbuf
> 1) {
2332 kmem_free(dabuf
->data
);
2335 kmem_zone_free(xfs_dabuf_zone
, dabuf
);
2340 * Log transaction from a dabuf.
2343 xfs_da_log_buf(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
, uint first
, uint last
)
2351 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2352 if (dabuf
->nbuf
== 1) {
2353 ASSERT(dabuf
->data
== dabuf
->bps
[0]->b_addr
);
2354 xfs_trans_log_buf(tp
, dabuf
->bps
[0], first
, last
);
2358 ASSERT(first
<= last
);
2359 for (i
= off
= 0; i
< dabuf
->nbuf
; i
++, off
+= XFS_BUF_COUNT(bp
)) {
2362 l
= f
+ XFS_BUF_COUNT(bp
) - 1;
2368 xfs_trans_log_buf(tp
, bp
, f
- off
, l
- off
);
2370 * B_DONE is set by xfs_trans_log buf.
2371 * If we don't set it on a new buffer (get not read)
2372 * then if we don't put anything in the buffer it won't
2373 * be set, and at commit it it released into the cache,
2374 * and then a read will fail.
2376 else if (!(XFS_BUF_ISDONE(bp
)))
2383 * Release dabuf from a transaction.
2384 * Have to free up the dabuf before the buffers are released,
2385 * since the synchronization on the dabuf is really the lock on the buffer.
2388 xfs_da_brelse(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
)
2395 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2396 if ((nbuf
= dabuf
->nbuf
) == 1) {
2400 bplist
= kmem_alloc(nbuf
* sizeof(*bplist
), KM_SLEEP
);
2401 memcpy(bplist
, dabuf
->bps
, nbuf
* sizeof(*bplist
));
2403 xfs_da_buf_done(dabuf
);
2404 for (i
= 0; i
< nbuf
; i
++)
2405 xfs_trans_brelse(tp
, bplist
[i
]);
2411 * Invalidate dabuf from a transaction.
2414 xfs_da_binval(xfs_trans_t
*tp
, xfs_dabuf_t
*dabuf
)
2421 ASSERT(dabuf
->nbuf
&& dabuf
->data
&& dabuf
->bbcount
&& dabuf
->bps
[0]);
2422 if ((nbuf
= dabuf
->nbuf
) == 1) {
2426 bplist
= kmem_alloc(nbuf
* sizeof(*bplist
), KM_SLEEP
);
2427 memcpy(bplist
, dabuf
->bps
, nbuf
* sizeof(*bplist
));
2429 xfs_da_buf_done(dabuf
);
2430 for (i
= 0; i
< nbuf
; i
++)
2431 xfs_trans_binval(tp
, bplist
[i
]);
2437 * Get the first daddr from a dabuf.
2440 xfs_da_blkno(xfs_dabuf_t
*dabuf
)
2442 ASSERT(dabuf
->nbuf
);
2443 ASSERT(dabuf
->data
);
2444 return XFS_BUF_ADDR(dabuf
->bps
[0]);