2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
23 #include "xfs_trans.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_bmap_btree.h"
30 #include "xfs_dir2_format.h"
31 #include "xfs_dir2_priv.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
38 #include "xfs_attr_leaf.h"
39 #include "xfs_error.h"
40 #include "xfs_trace.h"
45 * Routines to implement directories as Btrees of hashed names.
48 /*========================================================================
49 * Function prototypes for the kernel.
50 *========================================================================*/
53 * Routines used for growing the Btree.
55 STATIC
int xfs_da_root_split(xfs_da_state_t
*state
,
56 xfs_da_state_blk_t
*existing_root
,
57 xfs_da_state_blk_t
*new_child
);
58 STATIC
int xfs_da_node_split(xfs_da_state_t
*state
,
59 xfs_da_state_blk_t
*existing_blk
,
60 xfs_da_state_blk_t
*split_blk
,
61 xfs_da_state_blk_t
*blk_to_add
,
64 STATIC
void xfs_da_node_rebalance(xfs_da_state_t
*state
,
65 xfs_da_state_blk_t
*node_blk_1
,
66 xfs_da_state_blk_t
*node_blk_2
);
67 STATIC
void xfs_da_node_add(xfs_da_state_t
*state
,
68 xfs_da_state_blk_t
*old_node_blk
,
69 xfs_da_state_blk_t
*new_node_blk
);
72 * Routines used for shrinking the Btree.
74 STATIC
int xfs_da_root_join(xfs_da_state_t
*state
,
75 xfs_da_state_blk_t
*root_blk
);
76 STATIC
int xfs_da_node_toosmall(xfs_da_state_t
*state
, int *retval
);
77 STATIC
void xfs_da_node_remove(xfs_da_state_t
*state
,
78 xfs_da_state_blk_t
*drop_blk
);
79 STATIC
void xfs_da_node_unbalance(xfs_da_state_t
*state
,
80 xfs_da_state_blk_t
*src_node_blk
,
81 xfs_da_state_blk_t
*dst_node_blk
);
86 STATIC uint
xfs_da_node_lasthash(struct xfs_buf
*bp
, int *count
);
87 STATIC
int xfs_da_node_order(struct xfs_buf
*node1_bp
,
88 struct xfs_buf
*node2_bp
);
89 STATIC
int xfs_da_blk_unlink(xfs_da_state_t
*state
,
90 xfs_da_state_blk_t
*drop_blk
,
91 xfs_da_state_blk_t
*save_blk
);
92 STATIC
void xfs_da_state_kill_altpath(xfs_da_state_t
*state
);
98 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
99 struct xfs_da_node_hdr
*hdr
= bp
->b_addr
;
102 block_ok
= hdr
->info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
);
103 block_ok
= block_ok
&&
104 be16_to_cpu(hdr
->level
) > 0 &&
105 be16_to_cpu(hdr
->count
) > 0 ;
107 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, hdr
);
108 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
114 xfs_da_node_write_verify(
117 xfs_da_node_verify(bp
);
121 xfs_da_node_read_verify(
124 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
125 struct xfs_da_blkinfo
*info
= bp
->b_addr
;
127 switch (be16_to_cpu(info
->magic
)) {
128 case XFS_DA_NODE_MAGIC
:
129 xfs_da_node_verify(bp
);
131 case XFS_ATTR_LEAF_MAGIC
:
132 xfs_attr_leaf_read_verify(bp
);
134 case XFS_DIR2_LEAFN_MAGIC
:
135 xfs_dir2_leafn_read_verify(bp
);
138 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
,
140 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
144 bp
->b_pre_io
= xfs_da_node_write_verify
;
146 xfs_buf_ioend(bp
, 0);
151 struct xfs_trans
*tp
,
152 struct xfs_inode
*dp
,
154 xfs_daddr_t mappedbno
,
155 struct xfs_buf
**bpp
,
158 return xfs_da_read_buf(tp
, dp
, bno
, mappedbno
, bpp
,
159 which_fork
, xfs_da_node_read_verify
);
162 /*========================================================================
163 * Routines used for growing the Btree.
164 *========================================================================*/
167 * Create the initial contents of an intermediate node.
170 xfs_da_node_create(xfs_da_args_t
*args
, xfs_dablk_t blkno
, int level
,
171 struct xfs_buf
**bpp
, int whichfork
)
173 xfs_da_intnode_t
*node
;
178 trace_xfs_da_node_create(args
);
181 error
= xfs_da_get_buf(tp
, args
->dp
, blkno
, -1, &bp
, whichfork
);
186 node
->hdr
.info
.forw
= 0;
187 node
->hdr
.info
.back
= 0;
188 node
->hdr
.info
.magic
= cpu_to_be16(XFS_DA_NODE_MAGIC
);
189 node
->hdr
.info
.pad
= 0;
191 node
->hdr
.level
= cpu_to_be16(level
);
193 xfs_trans_log_buf(tp
, bp
,
194 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
196 bp
->b_pre_io
= xfs_da_node_write_verify
;
202 * Split a leaf node, rebalance, then possibly split
203 * intermediate nodes, rebalance, etc.
206 xfs_da_split(xfs_da_state_t
*state
)
208 xfs_da_state_blk_t
*oldblk
, *newblk
, *addblk
;
209 xfs_da_intnode_t
*node
;
211 int max
, action
, error
, i
;
213 trace_xfs_da_split(state
->args
);
216 * Walk back up the tree splitting/inserting/adjusting as necessary.
217 * If we need to insert and there isn't room, split the node, then
218 * decide which fragment to insert the new block from below into.
219 * Note that we may split the root this way, but we need more fixup.
221 max
= state
->path
.active
- 1;
222 ASSERT((max
>= 0) && (max
< XFS_DA_NODE_MAXDEPTH
));
223 ASSERT(state
->path
.blk
[max
].magic
== XFS_ATTR_LEAF_MAGIC
||
224 state
->path
.blk
[max
].magic
== XFS_DIR2_LEAFN_MAGIC
);
226 addblk
= &state
->path
.blk
[max
]; /* initial dummy value */
227 for (i
= max
; (i
>= 0) && addblk
; state
->path
.active
--, i
--) {
228 oldblk
= &state
->path
.blk
[i
];
229 newblk
= &state
->altpath
.blk
[i
];
232 * If a leaf node then
233 * Allocate a new leaf node, then rebalance across them.
234 * else if an intermediate node then
235 * We split on the last layer, must we split the node?
237 switch (oldblk
->magic
) {
238 case XFS_ATTR_LEAF_MAGIC
:
239 error
= xfs_attr_leaf_split(state
, oldblk
, newblk
);
240 if ((error
!= 0) && (error
!= ENOSPC
)) {
241 return(error
); /* GROT: attr is inconsistent */
248 * Entry wouldn't fit, split the leaf again.
250 state
->extravalid
= 1;
252 state
->extraafter
= 0; /* before newblk */
253 trace_xfs_attr_leaf_split_before(state
->args
);
254 error
= xfs_attr_leaf_split(state
, oldblk
,
257 state
->extraafter
= 1; /* after newblk */
258 trace_xfs_attr_leaf_split_after(state
->args
);
259 error
= xfs_attr_leaf_split(state
, newblk
,
263 return(error
); /* GROT: attr inconsistent */
266 case XFS_DIR2_LEAFN_MAGIC
:
267 error
= xfs_dir2_leafn_split(state
, oldblk
, newblk
);
272 case XFS_DA_NODE_MAGIC
:
273 error
= xfs_da_node_split(state
, oldblk
, newblk
, addblk
,
277 return(error
); /* GROT: dir is inconsistent */
279 * Record the newly split block for the next time thru?
289 * Update the btree to show the new hashval for this child.
291 xfs_da_fixhashpath(state
, &state
->path
);
297 * Split the root node.
299 ASSERT(state
->path
.active
== 0);
300 oldblk
= &state
->path
.blk
[0];
301 error
= xfs_da_root_split(state
, oldblk
, addblk
);
304 return(error
); /* GROT: dir is inconsistent */
308 * Update pointers to the node which used to be block 0 and
309 * just got bumped because of the addition of a new root node.
310 * There might be three blocks involved if a double split occurred,
311 * and the original block 0 could be at any position in the list.
314 node
= oldblk
->bp
->b_addr
;
315 if (node
->hdr
.info
.forw
) {
316 if (be32_to_cpu(node
->hdr
.info
.forw
) == addblk
->blkno
) {
319 ASSERT(state
->extravalid
);
320 bp
= state
->extrablk
.bp
;
323 node
->hdr
.info
.back
= cpu_to_be32(oldblk
->blkno
);
324 xfs_trans_log_buf(state
->args
->trans
, bp
,
325 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
326 sizeof(node
->hdr
.info
)));
328 node
= oldblk
->bp
->b_addr
;
329 if (node
->hdr
.info
.back
) {
330 if (be32_to_cpu(node
->hdr
.info
.back
) == addblk
->blkno
) {
333 ASSERT(state
->extravalid
);
334 bp
= state
->extrablk
.bp
;
337 node
->hdr
.info
.forw
= cpu_to_be32(oldblk
->blkno
);
338 xfs_trans_log_buf(state
->args
->trans
, bp
,
339 XFS_DA_LOGRANGE(node
, &node
->hdr
.info
,
340 sizeof(node
->hdr
.info
)));
347 * Split the root. We have to create a new root and point to the two
348 * parts (the split old root) that we just created. Copy block zero to
349 * the EOF, extending the inode in process.
351 STATIC
int /* error */
352 xfs_da_root_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
353 xfs_da_state_blk_t
*blk2
)
355 xfs_da_intnode_t
*node
, *oldroot
;
363 xfs_dir2_leaf_t
*leaf
;
365 trace_xfs_da_root_split(state
->args
);
368 * Copy the existing (incorrect) block from the root node position
369 * to a free space somewhere.
372 ASSERT(args
!= NULL
);
373 error
= xfs_da_grow_inode(args
, &blkno
);
379 error
= xfs_da_get_buf(tp
, dp
, blkno
, -1, &bp
, args
->whichfork
);
384 oldroot
= blk1
->bp
->b_addr
;
385 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
)) {
386 size
= (int)((char *)&oldroot
->btree
[be16_to_cpu(oldroot
->hdr
.count
)] -
389 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
));
390 leaf
= (xfs_dir2_leaf_t
*)oldroot
;
391 size
= (int)((char *)&leaf
->ents
[be16_to_cpu(leaf
->hdr
.count
)] -
394 memcpy(node
, oldroot
, size
);
395 xfs_trans_log_buf(tp
, bp
, 0, size
- 1);
397 bp
->b_pre_io
= blk1
->bp
->b_pre_io
;
402 * Set up the new root node.
404 error
= xfs_da_node_create(args
,
405 (args
->whichfork
== XFS_DATA_FORK
) ? mp
->m_dirleafblk
: 0,
406 be16_to_cpu(node
->hdr
.level
) + 1, &bp
, args
->whichfork
);
410 node
->btree
[0].hashval
= cpu_to_be32(blk1
->hashval
);
411 node
->btree
[0].before
= cpu_to_be32(blk1
->blkno
);
412 node
->btree
[1].hashval
= cpu_to_be32(blk2
->hashval
);
413 node
->btree
[1].before
= cpu_to_be32(blk2
->blkno
);
414 node
->hdr
.count
= cpu_to_be16(2);
417 if (oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
418 ASSERT(blk1
->blkno
>= mp
->m_dirleafblk
&&
419 blk1
->blkno
< mp
->m_dirfreeblk
);
420 ASSERT(blk2
->blkno
>= mp
->m_dirleafblk
&&
421 blk2
->blkno
< mp
->m_dirfreeblk
);
425 /* Header is already logged by xfs_da_node_create */
426 xfs_trans_log_buf(tp
, bp
,
427 XFS_DA_LOGRANGE(node
, node
->btree
,
428 sizeof(xfs_da_node_entry_t
) * 2));
434 * Split the node, rebalance, then add the new entry.
436 STATIC
int /* error */
437 xfs_da_node_split(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
438 xfs_da_state_blk_t
*newblk
,
439 xfs_da_state_blk_t
*addblk
,
440 int treelevel
, int *result
)
442 xfs_da_intnode_t
*node
;
447 trace_xfs_da_node_split(state
->args
);
449 node
= oldblk
->bp
->b_addr
;
450 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
453 * With V2 dirs the extra block is data or freespace.
455 useextra
= state
->extravalid
&& state
->args
->whichfork
== XFS_ATTR_FORK
;
456 newcount
= 1 + useextra
;
458 * Do we have to split the node?
460 if ((be16_to_cpu(node
->hdr
.count
) + newcount
) > state
->node_ents
) {
462 * Allocate a new node, add to the doubly linked chain of
463 * nodes, then move some of our excess entries into it.
465 error
= xfs_da_grow_inode(state
->args
, &blkno
);
467 return(error
); /* GROT: dir is inconsistent */
469 error
= xfs_da_node_create(state
->args
, blkno
, treelevel
,
470 &newblk
->bp
, state
->args
->whichfork
);
472 return(error
); /* GROT: dir is inconsistent */
473 newblk
->blkno
= blkno
;
474 newblk
->magic
= XFS_DA_NODE_MAGIC
;
475 xfs_da_node_rebalance(state
, oldblk
, newblk
);
476 error
= xfs_da_blk_link(state
, oldblk
, newblk
);
485 * Insert the new entry(s) into the correct block
486 * (updating last hashval in the process).
488 * xfs_da_node_add() inserts BEFORE the given index,
489 * and as a result of using node_lookup_int() we always
490 * point to a valid entry (not after one), but a split
491 * operation always results in a new block whose hashvals
492 * FOLLOW the current block.
494 * If we had double-split op below us, then add the extra block too.
496 node
= oldblk
->bp
->b_addr
;
497 if (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)) {
499 xfs_da_node_add(state
, oldblk
, addblk
);
501 if (state
->extraafter
)
503 xfs_da_node_add(state
, oldblk
, &state
->extrablk
);
504 state
->extravalid
= 0;
508 xfs_da_node_add(state
, newblk
, addblk
);
510 if (state
->extraafter
)
512 xfs_da_node_add(state
, newblk
, &state
->extrablk
);
513 state
->extravalid
= 0;
521 * Balance the btree elements between two intermediate nodes,
522 * usually one full and one empty.
524 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
527 xfs_da_node_rebalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*blk1
,
528 xfs_da_state_blk_t
*blk2
)
530 xfs_da_intnode_t
*node1
, *node2
, *tmpnode
;
531 xfs_da_node_entry_t
*btree_s
, *btree_d
;
535 trace_xfs_da_node_rebalance(state
->args
);
537 node1
= blk1
->bp
->b_addr
;
538 node2
= blk2
->bp
->b_addr
;
540 * Figure out how many entries need to move, and in which direction.
541 * Swap the nodes around if that makes it simpler.
543 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
544 ((be32_to_cpu(node2
->btree
[0].hashval
) < be32_to_cpu(node1
->btree
[0].hashval
)) ||
545 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
546 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
551 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
552 ASSERT(node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
553 count
= (be16_to_cpu(node1
->hdr
.count
) - be16_to_cpu(node2
->hdr
.count
)) / 2;
556 tp
= state
->args
->trans
;
558 * Two cases: high-to-low and low-to-high.
562 * Move elements in node2 up to make a hole.
564 if ((tmp
= be16_to_cpu(node2
->hdr
.count
)) > 0) {
565 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
566 btree_s
= &node2
->btree
[0];
567 btree_d
= &node2
->btree
[count
];
568 memmove(btree_d
, btree_s
, tmp
);
572 * Move the req'd B-tree elements from high in node1 to
575 be16_add_cpu(&node2
->hdr
.count
, count
);
576 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
577 btree_s
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
) - count
];
578 btree_d
= &node2
->btree
[0];
579 memcpy(btree_d
, btree_s
, tmp
);
580 be16_add_cpu(&node1
->hdr
.count
, -count
);
583 * Move the req'd B-tree elements from low in node2 to
587 tmp
= count
* (uint
)sizeof(xfs_da_node_entry_t
);
588 btree_s
= &node2
->btree
[0];
589 btree_d
= &node1
->btree
[be16_to_cpu(node1
->hdr
.count
)];
590 memcpy(btree_d
, btree_s
, tmp
);
591 be16_add_cpu(&node1
->hdr
.count
, count
);
592 xfs_trans_log_buf(tp
, blk1
->bp
,
593 XFS_DA_LOGRANGE(node1
, btree_d
, tmp
));
596 * Move elements in node2 down to fill the hole.
598 tmp
= be16_to_cpu(node2
->hdr
.count
) - count
;
599 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
600 btree_s
= &node2
->btree
[count
];
601 btree_d
= &node2
->btree
[0];
602 memmove(btree_d
, btree_s
, tmp
);
603 be16_add_cpu(&node2
->hdr
.count
, -count
);
607 * Log header of node 1 and all current bits of node 2.
609 xfs_trans_log_buf(tp
, blk1
->bp
,
610 XFS_DA_LOGRANGE(node1
, &node1
->hdr
, sizeof(node1
->hdr
)));
611 xfs_trans_log_buf(tp
, blk2
->bp
,
612 XFS_DA_LOGRANGE(node2
, &node2
->hdr
,
614 sizeof(node2
->btree
[0]) * be16_to_cpu(node2
->hdr
.count
)));
617 * Record the last hashval from each block for upward propagation.
618 * (note: don't use the swapped node pointers)
620 node1
= blk1
->bp
->b_addr
;
621 node2
= blk2
->bp
->b_addr
;
622 blk1
->hashval
= be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
);
623 blk2
->hashval
= be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
);
626 * Adjust the expected index for insertion.
628 if (blk1
->index
>= be16_to_cpu(node1
->hdr
.count
)) {
629 blk2
->index
= blk1
->index
- be16_to_cpu(node1
->hdr
.count
);
630 blk1
->index
= be16_to_cpu(node1
->hdr
.count
) + 1; /* make it invalid */
635 * Add a new entry to an intermediate node.
638 xfs_da_node_add(xfs_da_state_t
*state
, xfs_da_state_blk_t
*oldblk
,
639 xfs_da_state_blk_t
*newblk
)
641 xfs_da_intnode_t
*node
;
642 xfs_da_node_entry_t
*btree
;
645 trace_xfs_da_node_add(state
->args
);
647 node
= oldblk
->bp
->b_addr
;
648 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
649 ASSERT((oldblk
->index
>= 0) && (oldblk
->index
<= be16_to_cpu(node
->hdr
.count
)));
650 ASSERT(newblk
->blkno
!= 0);
651 if (state
->args
->whichfork
== XFS_DATA_FORK
)
652 ASSERT(newblk
->blkno
>= state
->mp
->m_dirleafblk
&&
653 newblk
->blkno
< state
->mp
->m_dirfreeblk
);
656 * We may need to make some room before we insert the new node.
659 btree
= &node
->btree
[ oldblk
->index
];
660 if (oldblk
->index
< be16_to_cpu(node
->hdr
.count
)) {
661 tmp
= (be16_to_cpu(node
->hdr
.count
) - oldblk
->index
) * (uint
)sizeof(*btree
);
662 memmove(btree
+ 1, btree
, tmp
);
664 btree
->hashval
= cpu_to_be32(newblk
->hashval
);
665 btree
->before
= cpu_to_be32(newblk
->blkno
);
666 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
667 XFS_DA_LOGRANGE(node
, btree
, tmp
+ sizeof(*btree
)));
668 be16_add_cpu(&node
->hdr
.count
, 1);
669 xfs_trans_log_buf(state
->args
->trans
, oldblk
->bp
,
670 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
673 * Copy the last hash value from the oldblk to propagate upwards.
675 oldblk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1 ].hashval
);
678 /*========================================================================
679 * Routines used for shrinking the Btree.
680 *========================================================================*/
683 * Deallocate an empty leaf node, remove it from its parent,
684 * possibly deallocating that block, etc...
687 xfs_da_join(xfs_da_state_t
*state
)
689 xfs_da_state_blk_t
*drop_blk
, *save_blk
;
692 trace_xfs_da_join(state
->args
);
695 drop_blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
696 save_blk
= &state
->altpath
.blk
[ state
->path
.active
-1 ];
697 ASSERT(state
->path
.blk
[0].magic
== XFS_DA_NODE_MAGIC
);
698 ASSERT(drop_blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
699 drop_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
702 * Walk back up the tree joining/deallocating as necessary.
703 * When we stop dropping blocks, break out.
705 for ( ; state
->path
.active
>= 2; drop_blk
--, save_blk
--,
706 state
->path
.active
--) {
708 * See if we can combine the block with a neighbor.
709 * (action == 0) => no options, just leave
710 * (action == 1) => coalesce, then unlink
711 * (action == 2) => block empty, unlink it
713 switch (drop_blk
->magic
) {
714 case XFS_ATTR_LEAF_MAGIC
:
715 error
= xfs_attr_leaf_toosmall(state
, &action
);
720 xfs_attr_leaf_unbalance(state
, drop_blk
, save_blk
);
722 case XFS_DIR2_LEAFN_MAGIC
:
723 error
= xfs_dir2_leafn_toosmall(state
, &action
);
728 xfs_dir2_leafn_unbalance(state
, drop_blk
, save_blk
);
730 case XFS_DA_NODE_MAGIC
:
732 * Remove the offending node, fixup hashvals,
733 * check for a toosmall neighbor.
735 xfs_da_node_remove(state
, drop_blk
);
736 xfs_da_fixhashpath(state
, &state
->path
);
737 error
= xfs_da_node_toosmall(state
, &action
);
742 xfs_da_node_unbalance(state
, drop_blk
, save_blk
);
745 xfs_da_fixhashpath(state
, &state
->altpath
);
746 error
= xfs_da_blk_unlink(state
, drop_blk
, save_blk
);
747 xfs_da_state_kill_altpath(state
);
750 error
= xfs_da_shrink_inode(state
->args
, drop_blk
->blkno
,
757 * We joined all the way to the top. If it turns out that
758 * we only have one entry in the root, make the child block
761 xfs_da_node_remove(state
, drop_blk
);
762 xfs_da_fixhashpath(state
, &state
->path
);
763 error
= xfs_da_root_join(state
, &state
->path
.blk
[0]);
769 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo
*blkinfo
, __u16 level
)
771 __be16 magic
= blkinfo
->magic
;
774 ASSERT(magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
775 magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
777 ASSERT(magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
778 ASSERT(!blkinfo
->forw
);
779 ASSERT(!blkinfo
->back
);
782 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
786 * We have only one entry in the root. Copy the only remaining child of
787 * the old root to block 0 as the new root node.
790 xfs_da_root_join(xfs_da_state_t
*state
, xfs_da_state_blk_t
*root_blk
)
792 xfs_da_intnode_t
*oldroot
;
798 trace_xfs_da_root_join(state
->args
);
801 ASSERT(args
!= NULL
);
802 ASSERT(root_blk
->magic
== XFS_DA_NODE_MAGIC
);
803 oldroot
= root_blk
->bp
->b_addr
;
804 ASSERT(oldroot
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
805 ASSERT(!oldroot
->hdr
.info
.forw
);
806 ASSERT(!oldroot
->hdr
.info
.back
);
809 * If the root has more than one child, then don't do anything.
811 if (be16_to_cpu(oldroot
->hdr
.count
) > 1)
815 * Read in the (only) child block, then copy those bytes into
816 * the root block's buffer and free the original child block.
818 child
= be32_to_cpu(oldroot
->btree
[0].before
);
820 error
= xfs_da_node_read(args
->trans
, args
->dp
, child
, -1, &bp
,
825 xfs_da_blkinfo_onlychild_validate(bp
->b_addr
,
826 be16_to_cpu(oldroot
->hdr
.level
));
829 * This could be copying a leaf back into the root block in the case of
830 * there only being a single leaf block left in the tree. Hence we have
831 * to update the pre_io pointer as well to match the buffer type change
834 memcpy(root_blk
->bp
->b_addr
, bp
->b_addr
, state
->blocksize
);
835 root_blk
->bp
->b_pre_io
= bp
->b_pre_io
;
836 xfs_trans_log_buf(args
->trans
, root_blk
->bp
, 0, state
->blocksize
- 1);
837 error
= xfs_da_shrink_inode(args
, child
, bp
);
842 * Check a node block and its neighbors to see if the block should be
843 * collapsed into one or the other neighbor. Always keep the block
844 * with the smaller block number.
845 * If the current block is over 50% full, don't try to join it, return 0.
846 * If the block is empty, fill in the state structure and return 2.
847 * If it can be collapsed, fill in the state structure and return 1.
848 * If nothing can be done, return 0.
851 xfs_da_node_toosmall(xfs_da_state_t
*state
, int *action
)
853 xfs_da_intnode_t
*node
;
854 xfs_da_state_blk_t
*blk
;
855 xfs_da_blkinfo_t
*info
;
856 int count
, forward
, error
, retval
, i
;
860 trace_xfs_da_node_toosmall(state
->args
);
863 * Check for the degenerate case of the block being over 50% full.
864 * If so, it's not worth even looking to see if we might be able
865 * to coalesce with a sibling.
867 blk
= &state
->path
.blk
[ state
->path
.active
-1 ];
868 info
= blk
->bp
->b_addr
;
869 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
870 node
= (xfs_da_intnode_t
*)info
;
871 count
= be16_to_cpu(node
->hdr
.count
);
872 if (count
> (state
->node_ents
>> 1)) {
873 *action
= 0; /* blk over 50%, don't try to join */
874 return(0); /* blk over 50%, don't try to join */
878 * Check for the degenerate case of the block being empty.
879 * If the block is empty, we'll simply delete it, no need to
880 * coalesce it with a sibling block. We choose (arbitrarily)
881 * to merge with the forward block unless it is NULL.
885 * Make altpath point to the block we want to keep and
886 * path point to the block we want to drop (this one).
888 forward
= (info
->forw
!= 0);
889 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
890 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
903 * Examine each sibling block to see if we can coalesce with
904 * at least 25% free space to spare. We need to figure out
905 * whether to merge with the forward or the backward block.
906 * We prefer coalescing with the lower numbered sibling so as
907 * to shrink a directory over time.
909 /* start with smaller blk num */
910 forward
= (be32_to_cpu(info
->forw
) < be32_to_cpu(info
->back
));
911 for (i
= 0; i
< 2; forward
= !forward
, i
++) {
913 blkno
= be32_to_cpu(info
->forw
);
915 blkno
= be32_to_cpu(info
->back
);
918 error
= xfs_da_node_read(state
->args
->trans
, state
->args
->dp
,
919 blkno
, -1, &bp
, state
->args
->whichfork
);
924 node
= (xfs_da_intnode_t
*)info
;
925 count
= state
->node_ents
;
926 count
-= state
->node_ents
>> 2;
927 count
-= be16_to_cpu(node
->hdr
.count
);
929 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
930 count
-= be16_to_cpu(node
->hdr
.count
);
931 xfs_trans_brelse(state
->args
->trans
, bp
);
933 break; /* fits with at least 25% to spare */
941 * Make altpath point to the block we want to keep (the lower
942 * numbered block) and path point to the block we want to drop.
944 memcpy(&state
->altpath
, &state
->path
, sizeof(state
->path
));
945 if (blkno
< blk
->blkno
) {
946 error
= xfs_da_path_shift(state
, &state
->altpath
, forward
,
956 error
= xfs_da_path_shift(state
, &state
->path
, forward
,
971 * Walk back up the tree adjusting hash values as necessary,
972 * when we stop making changes, return.
975 xfs_da_fixhashpath(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
)
977 xfs_da_state_blk_t
*blk
;
978 xfs_da_intnode_t
*node
;
979 xfs_da_node_entry_t
*btree
;
980 xfs_dahash_t lasthash
=0;
983 trace_xfs_da_fixhashpath(state
->args
);
985 level
= path
->active
-1;
986 blk
= &path
->blk
[ level
];
987 switch (blk
->magic
) {
988 case XFS_ATTR_LEAF_MAGIC
:
989 lasthash
= xfs_attr_leaf_lasthash(blk
->bp
, &count
);
993 case XFS_DIR2_LEAFN_MAGIC
:
994 lasthash
= xfs_dir2_leafn_lasthash(blk
->bp
, &count
);
998 case XFS_DA_NODE_MAGIC
:
999 lasthash
= xfs_da_node_lasthash(blk
->bp
, &count
);
1004 for (blk
--, level
--; level
>= 0; blk
--, level
--) {
1005 node
= blk
->bp
->b_addr
;
1006 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1007 btree
= &node
->btree
[ blk
->index
];
1008 if (be32_to_cpu(btree
->hashval
) == lasthash
)
1010 blk
->hashval
= lasthash
;
1011 btree
->hashval
= cpu_to_be32(lasthash
);
1012 xfs_trans_log_buf(state
->args
->trans
, blk
->bp
,
1013 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
1015 lasthash
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1020 * Remove an entry from an intermediate node.
1023 xfs_da_node_remove(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
)
1025 xfs_da_intnode_t
*node
;
1026 xfs_da_node_entry_t
*btree
;
1029 trace_xfs_da_node_remove(state
->args
);
1031 node
= drop_blk
->bp
->b_addr
;
1032 ASSERT(drop_blk
->index
< be16_to_cpu(node
->hdr
.count
));
1033 ASSERT(drop_blk
->index
>= 0);
1036 * Copy over the offending entry, or just zero it out.
1038 btree
= &node
->btree
[drop_blk
->index
];
1039 if (drop_blk
->index
< (be16_to_cpu(node
->hdr
.count
)-1)) {
1040 tmp
= be16_to_cpu(node
->hdr
.count
) - drop_blk
->index
- 1;
1041 tmp
*= (uint
)sizeof(xfs_da_node_entry_t
);
1042 memmove(btree
, btree
+ 1, tmp
);
1043 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1044 XFS_DA_LOGRANGE(node
, btree
, tmp
));
1045 btree
= &node
->btree
[be16_to_cpu(node
->hdr
.count
)-1];
1047 memset((char *)btree
, 0, sizeof(xfs_da_node_entry_t
));
1048 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1049 XFS_DA_LOGRANGE(node
, btree
, sizeof(*btree
)));
1050 be16_add_cpu(&node
->hdr
.count
, -1);
1051 xfs_trans_log_buf(state
->args
->trans
, drop_blk
->bp
,
1052 XFS_DA_LOGRANGE(node
, &node
->hdr
, sizeof(node
->hdr
)));
1055 * Copy the last hash value from the block to propagate upwards.
1058 drop_blk
->hashval
= be32_to_cpu(btree
->hashval
);
1062 * Unbalance the btree elements between two intermediate nodes,
1063 * move all Btree elements from one node into another.
1066 xfs_da_node_unbalance(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1067 xfs_da_state_blk_t
*save_blk
)
1069 xfs_da_intnode_t
*drop_node
, *save_node
;
1070 xfs_da_node_entry_t
*btree
;
1074 trace_xfs_da_node_unbalance(state
->args
);
1076 drop_node
= drop_blk
->bp
->b_addr
;
1077 save_node
= save_blk
->bp
->b_addr
;
1078 ASSERT(drop_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1079 ASSERT(save_node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1080 tp
= state
->args
->trans
;
1083 * If the dying block has lower hashvals, then move all the
1084 * elements in the remaining block up to make a hole.
1086 if ((be32_to_cpu(drop_node
->btree
[0].hashval
) < be32_to_cpu(save_node
->btree
[ 0 ].hashval
)) ||
1087 (be32_to_cpu(drop_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)-1].hashval
) <
1088 be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
)))
1090 btree
= &save_node
->btree
[be16_to_cpu(drop_node
->hdr
.count
)];
1091 tmp
= be16_to_cpu(save_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1092 memmove(btree
, &save_node
->btree
[0], tmp
);
1093 btree
= &save_node
->btree
[0];
1094 xfs_trans_log_buf(tp
, save_blk
->bp
,
1095 XFS_DA_LOGRANGE(save_node
, btree
,
1096 (be16_to_cpu(save_node
->hdr
.count
) + be16_to_cpu(drop_node
->hdr
.count
)) *
1097 sizeof(xfs_da_node_entry_t
)));
1099 btree
= &save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)];
1100 xfs_trans_log_buf(tp
, save_blk
->bp
,
1101 XFS_DA_LOGRANGE(save_node
, btree
,
1102 be16_to_cpu(drop_node
->hdr
.count
) *
1103 sizeof(xfs_da_node_entry_t
)));
1107 * Move all the B-tree elements from drop_blk to save_blk.
1109 tmp
= be16_to_cpu(drop_node
->hdr
.count
) * (uint
)sizeof(xfs_da_node_entry_t
);
1110 memcpy(btree
, &drop_node
->btree
[0], tmp
);
1111 be16_add_cpu(&save_node
->hdr
.count
, be16_to_cpu(drop_node
->hdr
.count
));
1113 xfs_trans_log_buf(tp
, save_blk
->bp
,
1114 XFS_DA_LOGRANGE(save_node
, &save_node
->hdr
,
1115 sizeof(save_node
->hdr
)));
1118 * Save the last hashval in the remaining block for upward propagation.
1120 save_blk
->hashval
= be32_to_cpu(save_node
->btree
[be16_to_cpu(save_node
->hdr
.count
)-1].hashval
);
1123 /*========================================================================
1124 * Routines used for finding things in the Btree.
1125 *========================================================================*/
1128 * Walk down the Btree looking for a particular filename, filling
1129 * in the state structure as we go.
1131 * We will set the state structure to point to each of the elements
1132 * in each of the nodes where either the hashval is or should be.
1134 * We support duplicate hashval's so for each entry in the current
1135 * node that could contain the desired hashval, descend. This is a
1136 * pruned depth-first tree search.
1139 xfs_da_node_lookup_int(xfs_da_state_t
*state
, int *result
)
1141 xfs_da_state_blk_t
*blk
;
1142 xfs_da_blkinfo_t
*curr
;
1143 xfs_da_intnode_t
*node
;
1144 xfs_da_node_entry_t
*btree
;
1146 int probe
, span
, max
, error
, retval
;
1147 xfs_dahash_t hashval
, btreehashval
;
1148 xfs_da_args_t
*args
;
1153 * Descend thru the B-tree searching each level for the right
1154 * node to use, until the right hashval is found.
1156 blkno
= (args
->whichfork
== XFS_DATA_FORK
)? state
->mp
->m_dirleafblk
: 0;
1157 for (blk
= &state
->path
.blk
[0], state
->path
.active
= 1;
1158 state
->path
.active
<= XFS_DA_NODE_MAXDEPTH
;
1159 blk
++, state
->path
.active
++) {
1161 * Read the next node down in the tree.
1164 error
= xfs_da_node_read(args
->trans
, args
->dp
, blkno
,
1165 -1, &blk
->bp
, args
->whichfork
);
1168 state
->path
.active
--;
1171 curr
= blk
->bp
->b_addr
;
1172 blk
->magic
= be16_to_cpu(curr
->magic
);
1173 ASSERT(blk
->magic
== XFS_DA_NODE_MAGIC
||
1174 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1175 blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1178 * Search an intermediate node for a match.
1180 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1181 node
= blk
->bp
->b_addr
;
1182 max
= be16_to_cpu(node
->hdr
.count
);
1183 blk
->hashval
= be32_to_cpu(node
->btree
[max
-1].hashval
);
1186 * Binary search. (note: small blocks will skip loop)
1188 probe
= span
= max
/ 2;
1189 hashval
= args
->hashval
;
1190 for (btree
= &node
->btree
[probe
]; span
> 4;
1191 btree
= &node
->btree
[probe
]) {
1193 btreehashval
= be32_to_cpu(btree
->hashval
);
1194 if (btreehashval
< hashval
)
1196 else if (btreehashval
> hashval
)
1201 ASSERT((probe
>= 0) && (probe
< max
));
1202 ASSERT((span
<= 4) || (be32_to_cpu(btree
->hashval
) == hashval
));
1205 * Since we may have duplicate hashval's, find the first
1206 * matching hashval in the node.
1208 while ((probe
> 0) && (be32_to_cpu(btree
->hashval
) >= hashval
)) {
1212 while ((probe
< max
) && (be32_to_cpu(btree
->hashval
) < hashval
)) {
1218 * Pick the right block to descend on.
1222 blkno
= be32_to_cpu(node
->btree
[max
-1].before
);
1225 blkno
= be32_to_cpu(btree
->before
);
1227 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1228 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
, NULL
);
1230 } else if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1231 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
, NULL
);
1237 * A leaf block that ends in the hashval that we are interested in
1238 * (final hashval == search hashval) means that the next block may
1239 * contain more entries with the same hashval, shift upward to the
1240 * next leaf and keep searching.
1243 if (blk
->magic
== XFS_DIR2_LEAFN_MAGIC
) {
1244 retval
= xfs_dir2_leafn_lookup_int(blk
->bp
, args
,
1245 &blk
->index
, state
);
1246 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1247 retval
= xfs_attr_leaf_lookup_int(blk
->bp
, args
);
1248 blk
->index
= args
->index
;
1249 args
->blkno
= blk
->blkno
;
1252 return XFS_ERROR(EFSCORRUPTED
);
1254 if (((retval
== ENOENT
) || (retval
== ENOATTR
)) &&
1255 (blk
->hashval
== args
->hashval
)) {
1256 error
= xfs_da_path_shift(state
, &state
->path
, 1, 1,
1262 } else if (blk
->magic
== XFS_ATTR_LEAF_MAGIC
) {
1263 /* path_shift() gives ENOENT */
1264 retval
= XFS_ERROR(ENOATTR
);
1273 /*========================================================================
1275 *========================================================================*/
1278 * Link a new block into a doubly linked list of blocks (of whatever type).
1281 xfs_da_blk_link(xfs_da_state_t
*state
, xfs_da_state_blk_t
*old_blk
,
1282 xfs_da_state_blk_t
*new_blk
)
1284 xfs_da_blkinfo_t
*old_info
, *new_info
, *tmp_info
;
1285 xfs_da_args_t
*args
;
1286 int before
=0, error
;
1290 * Set up environment.
1293 ASSERT(args
!= NULL
);
1294 old_info
= old_blk
->bp
->b_addr
;
1295 new_info
= new_blk
->bp
->b_addr
;
1296 ASSERT(old_blk
->magic
== XFS_DA_NODE_MAGIC
||
1297 old_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1298 old_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1299 ASSERT(old_blk
->magic
== be16_to_cpu(old_info
->magic
));
1300 ASSERT(new_blk
->magic
== be16_to_cpu(new_info
->magic
));
1301 ASSERT(old_blk
->magic
== new_blk
->magic
);
1303 switch (old_blk
->magic
) {
1304 case XFS_ATTR_LEAF_MAGIC
:
1305 before
= xfs_attr_leaf_order(old_blk
->bp
, new_blk
->bp
);
1307 case XFS_DIR2_LEAFN_MAGIC
:
1308 before
= xfs_dir2_leafn_order(old_blk
->bp
, new_blk
->bp
);
1310 case XFS_DA_NODE_MAGIC
:
1311 before
= xfs_da_node_order(old_blk
->bp
, new_blk
->bp
);
1316 * Link blocks in appropriate order.
1320 * Link new block in before existing block.
1322 trace_xfs_da_link_before(args
);
1323 new_info
->forw
= cpu_to_be32(old_blk
->blkno
);
1324 new_info
->back
= old_info
->back
;
1325 if (old_info
->back
) {
1326 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1327 be32_to_cpu(old_info
->back
),
1328 -1, &bp
, args
->whichfork
);
1332 tmp_info
= bp
->b_addr
;
1333 ASSERT(be16_to_cpu(tmp_info
->magic
) == be16_to_cpu(old_info
->magic
));
1334 ASSERT(be32_to_cpu(tmp_info
->forw
) == old_blk
->blkno
);
1335 tmp_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1336 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1338 old_info
->back
= cpu_to_be32(new_blk
->blkno
);
1341 * Link new block in after existing block.
1343 trace_xfs_da_link_after(args
);
1344 new_info
->forw
= old_info
->forw
;
1345 new_info
->back
= cpu_to_be32(old_blk
->blkno
);
1346 if (old_info
->forw
) {
1347 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1348 be32_to_cpu(old_info
->forw
),
1349 -1, &bp
, args
->whichfork
);
1353 tmp_info
= bp
->b_addr
;
1354 ASSERT(tmp_info
->magic
== old_info
->magic
);
1355 ASSERT(be32_to_cpu(tmp_info
->back
) == old_blk
->blkno
);
1356 tmp_info
->back
= cpu_to_be32(new_blk
->blkno
);
1357 xfs_trans_log_buf(args
->trans
, bp
, 0, sizeof(*tmp_info
)-1);
1359 old_info
->forw
= cpu_to_be32(new_blk
->blkno
);
1362 xfs_trans_log_buf(args
->trans
, old_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1363 xfs_trans_log_buf(args
->trans
, new_blk
->bp
, 0, sizeof(*tmp_info
) - 1);
1368 * Compare two intermediate nodes for "order".
1372 struct xfs_buf
*node1_bp
,
1373 struct xfs_buf
*node2_bp
)
1375 xfs_da_intnode_t
*node1
, *node2
;
1377 node1
= node1_bp
->b_addr
;
1378 node2
= node2_bp
->b_addr
;
1379 ASSERT(node1
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) &&
1380 node2
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1381 if ((be16_to_cpu(node1
->hdr
.count
) > 0) && (be16_to_cpu(node2
->hdr
.count
) > 0) &&
1382 ((be32_to_cpu(node2
->btree
[0].hashval
) <
1383 be32_to_cpu(node1
->btree
[0].hashval
)) ||
1384 (be32_to_cpu(node2
->btree
[be16_to_cpu(node2
->hdr
.count
)-1].hashval
) <
1385 be32_to_cpu(node1
->btree
[be16_to_cpu(node1
->hdr
.count
)-1].hashval
)))) {
1392 * Pick up the last hashvalue from an intermediate node.
1395 xfs_da_node_lasthash(
1399 xfs_da_intnode_t
*node
;
1402 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1404 *count
= be16_to_cpu(node
->hdr
.count
);
1405 if (!node
->hdr
.count
)
1407 return be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1411 * Unlink a block from a doubly linked list of blocks.
1413 STATIC
int /* error */
1414 xfs_da_blk_unlink(xfs_da_state_t
*state
, xfs_da_state_blk_t
*drop_blk
,
1415 xfs_da_state_blk_t
*save_blk
)
1417 xfs_da_blkinfo_t
*drop_info
, *save_info
, *tmp_info
;
1418 xfs_da_args_t
*args
;
1423 * Set up environment.
1426 ASSERT(args
!= NULL
);
1427 save_info
= save_blk
->bp
->b_addr
;
1428 drop_info
= drop_blk
->bp
->b_addr
;
1429 ASSERT(save_blk
->magic
== XFS_DA_NODE_MAGIC
||
1430 save_blk
->magic
== XFS_DIR2_LEAFN_MAGIC
||
1431 save_blk
->magic
== XFS_ATTR_LEAF_MAGIC
);
1432 ASSERT(save_blk
->magic
== be16_to_cpu(save_info
->magic
));
1433 ASSERT(drop_blk
->magic
== be16_to_cpu(drop_info
->magic
));
1434 ASSERT(save_blk
->magic
== drop_blk
->magic
);
1435 ASSERT((be32_to_cpu(save_info
->forw
) == drop_blk
->blkno
) ||
1436 (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
));
1437 ASSERT((be32_to_cpu(drop_info
->forw
) == save_blk
->blkno
) ||
1438 (be32_to_cpu(drop_info
->back
) == save_blk
->blkno
));
1441 * Unlink the leaf block from the doubly linked chain of leaves.
1443 if (be32_to_cpu(save_info
->back
) == drop_blk
->blkno
) {
1444 trace_xfs_da_unlink_back(args
);
1445 save_info
->back
= drop_info
->back
;
1446 if (drop_info
->back
) {
1447 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1448 be32_to_cpu(drop_info
->back
),
1449 -1, &bp
, args
->whichfork
);
1453 tmp_info
= bp
->b_addr
;
1454 ASSERT(tmp_info
->magic
== save_info
->magic
);
1455 ASSERT(be32_to_cpu(tmp_info
->forw
) == drop_blk
->blkno
);
1456 tmp_info
->forw
= cpu_to_be32(save_blk
->blkno
);
1457 xfs_trans_log_buf(args
->trans
, bp
, 0,
1458 sizeof(*tmp_info
) - 1);
1461 trace_xfs_da_unlink_forward(args
);
1462 save_info
->forw
= drop_info
->forw
;
1463 if (drop_info
->forw
) {
1464 error
= xfs_da_node_read(args
->trans
, args
->dp
,
1465 be32_to_cpu(drop_info
->forw
),
1466 -1, &bp
, args
->whichfork
);
1470 tmp_info
= bp
->b_addr
;
1471 ASSERT(tmp_info
->magic
== save_info
->magic
);
1472 ASSERT(be32_to_cpu(tmp_info
->back
) == drop_blk
->blkno
);
1473 tmp_info
->back
= cpu_to_be32(save_blk
->blkno
);
1474 xfs_trans_log_buf(args
->trans
, bp
, 0,
1475 sizeof(*tmp_info
) - 1);
1479 xfs_trans_log_buf(args
->trans
, save_blk
->bp
, 0, sizeof(*save_info
) - 1);
1484 * Move a path "forward" or "!forward" one block at the current level.
1486 * This routine will adjust a "path" to point to the next block
1487 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1488 * Btree, including updating pointers to the intermediate nodes between
1489 * the new bottom and the root.
1492 xfs_da_path_shift(xfs_da_state_t
*state
, xfs_da_state_path_t
*path
,
1493 int forward
, int release
, int *result
)
1495 xfs_da_state_blk_t
*blk
;
1496 xfs_da_blkinfo_t
*info
;
1497 xfs_da_intnode_t
*node
;
1498 xfs_da_args_t
*args
;
1499 xfs_dablk_t blkno
=0;
1502 trace_xfs_da_path_shift(state
->args
);
1505 * Roll up the Btree looking for the first block where our
1506 * current index is not at the edge of the block. Note that
1507 * we skip the bottom layer because we want the sibling block.
1510 ASSERT(args
!= NULL
);
1511 ASSERT(path
!= NULL
);
1512 ASSERT((path
->active
> 0) && (path
->active
< XFS_DA_NODE_MAXDEPTH
));
1513 level
= (path
->active
-1) - 1; /* skip bottom layer in path */
1514 for (blk
= &path
->blk
[level
]; level
>= 0; blk
--, level
--) {
1515 ASSERT(blk
->bp
!= NULL
);
1516 node
= blk
->bp
->b_addr
;
1517 ASSERT(node
->hdr
.info
.magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1518 if (forward
&& (blk
->index
< be16_to_cpu(node
->hdr
.count
)-1)) {
1520 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1522 } else if (!forward
&& (blk
->index
> 0)) {
1524 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1529 *result
= XFS_ERROR(ENOENT
); /* we're out of our tree */
1530 ASSERT(args
->op_flags
& XFS_DA_OP_OKNOENT
);
1535 * Roll down the edge of the subtree until we reach the
1536 * same depth we were at originally.
1538 for (blk
++, level
++; level
< path
->active
; blk
++, level
++) {
1540 * Release the old block.
1541 * (if it's dirty, trans won't actually let go)
1544 xfs_trans_brelse(args
->trans
, blk
->bp
);
1547 * Read the next child block.
1550 error
= xfs_da_node_read(args
->trans
, args
->dp
, blkno
, -1,
1551 &blk
->bp
, args
->whichfork
);
1554 ASSERT(blk
->bp
!= NULL
);
1555 info
= blk
->bp
->b_addr
;
1556 ASSERT(info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1557 info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
) ||
1558 info
->magic
== cpu_to_be16(XFS_ATTR_LEAF_MAGIC
));
1559 blk
->magic
= be16_to_cpu(info
->magic
);
1560 if (blk
->magic
== XFS_DA_NODE_MAGIC
) {
1561 node
= (xfs_da_intnode_t
*)info
;
1562 blk
->hashval
= be32_to_cpu(node
->btree
[be16_to_cpu(node
->hdr
.count
)-1].hashval
);
1566 blk
->index
= be16_to_cpu(node
->hdr
.count
)-1;
1567 blkno
= be32_to_cpu(node
->btree
[blk
->index
].before
);
1569 ASSERT(level
== path
->active
-1);
1571 switch(blk
->magic
) {
1572 case XFS_ATTR_LEAF_MAGIC
:
1573 blk
->hashval
= xfs_attr_leaf_lasthash(blk
->bp
,
1576 case XFS_DIR2_LEAFN_MAGIC
:
1577 blk
->hashval
= xfs_dir2_leafn_lasthash(blk
->bp
,
1581 ASSERT(blk
->magic
== XFS_ATTR_LEAF_MAGIC
||
1582 blk
->magic
== XFS_DIR2_LEAFN_MAGIC
);
1592 /*========================================================================
1594 *========================================================================*/
1597 * Implement a simple hash on a character string.
1598 * Rotate the hash value by 7 bits, then XOR each character in.
1599 * This is implemented with some source-level loop unrolling.
1602 xfs_da_hashname(const __uint8_t
*name
, int namelen
)
1607 * Do four characters at a time as long as we can.
1609 for (hash
= 0; namelen
>= 4; namelen
-= 4, name
+= 4)
1610 hash
= (name
[0] << 21) ^ (name
[1] << 14) ^ (name
[2] << 7) ^
1611 (name
[3] << 0) ^ rol32(hash
, 7 * 4);
1614 * Now do the rest of the characters.
1618 return (name
[0] << 14) ^ (name
[1] << 7) ^ (name
[2] << 0) ^
1621 return (name
[0] << 7) ^ (name
[1] << 0) ^ rol32(hash
, 7 * 2);
1623 return (name
[0] << 0) ^ rol32(hash
, 7 * 1);
1624 default: /* case 0: */
1631 struct xfs_da_args
*args
,
1632 const unsigned char *name
,
1635 return (args
->namelen
== len
&& memcmp(args
->name
, name
, len
) == 0) ?
1636 XFS_CMP_EXACT
: XFS_CMP_DIFFERENT
;
1640 xfs_default_hashname(
1641 struct xfs_name
*name
)
1643 return xfs_da_hashname(name
->name
, name
->len
);
1646 const struct xfs_nameops xfs_default_nameops
= {
1647 .hashname
= xfs_default_hashname
,
1648 .compname
= xfs_da_compname
1652 xfs_da_grow_inode_int(
1653 struct xfs_da_args
*args
,
1657 struct xfs_trans
*tp
= args
->trans
;
1658 struct xfs_inode
*dp
= args
->dp
;
1659 int w
= args
->whichfork
;
1660 xfs_drfsbno_t nblks
= dp
->i_d
.di_nblocks
;
1661 struct xfs_bmbt_irec map
, *mapp
;
1662 int nmap
, error
, got
, i
, mapi
;
1665 * Find a spot in the file space to put the new block.
1667 error
= xfs_bmap_first_unused(tp
, dp
, count
, bno
, w
);
1672 * Try mapping it in one filesystem block.
1675 ASSERT(args
->firstblock
!= NULL
);
1676 error
= xfs_bmapi_write(tp
, dp
, *bno
, count
,
1677 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
|XFS_BMAPI_CONTIG
,
1678 args
->firstblock
, args
->total
, &map
, &nmap
,
1687 } else if (nmap
== 0 && count
> 1) {
1692 * If we didn't get it and the block might work if fragmented,
1693 * try without the CONTIG flag. Loop until we get it all.
1695 mapp
= kmem_alloc(sizeof(*mapp
) * count
, KM_SLEEP
);
1696 for (b
= *bno
, mapi
= 0; b
< *bno
+ count
; ) {
1697 nmap
= MIN(XFS_BMAP_MAX_NMAP
, count
);
1698 c
= (int)(*bno
+ count
- b
);
1699 error
= xfs_bmapi_write(tp
, dp
, b
, c
,
1700 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
1701 args
->firstblock
, args
->total
,
1702 &mapp
[mapi
], &nmap
, args
->flist
);
1708 b
= mapp
[mapi
- 1].br_startoff
+
1709 mapp
[mapi
- 1].br_blockcount
;
1717 * Count the blocks we got, make sure it matches the total.
1719 for (i
= 0, got
= 0; i
< mapi
; i
++)
1720 got
+= mapp
[i
].br_blockcount
;
1721 if (got
!= count
|| mapp
[0].br_startoff
!= *bno
||
1722 mapp
[mapi
- 1].br_startoff
+ mapp
[mapi
- 1].br_blockcount
!=
1724 error
= XFS_ERROR(ENOSPC
);
1728 /* account for newly allocated blocks in reserved blocks total */
1729 args
->total
-= dp
->i_d
.di_nblocks
- nblks
;
1738 * Add a block to the btree ahead of the file.
1739 * Return the new block number to the caller.
1743 struct xfs_da_args
*args
,
1744 xfs_dablk_t
*new_blkno
)
1750 trace_xfs_da_grow_inode(args
);
1752 if (args
->whichfork
== XFS_DATA_FORK
) {
1753 bno
= args
->dp
->i_mount
->m_dirleafblk
;
1754 count
= args
->dp
->i_mount
->m_dirblkfsbs
;
1760 error
= xfs_da_grow_inode_int(args
, &bno
, count
);
1762 *new_blkno
= (xfs_dablk_t
)bno
;
1767 * Ick. We need to always be able to remove a btree block, even
1768 * if there's no space reservation because the filesystem is full.
1769 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1770 * It swaps the target block with the last block in the file. The
1771 * last block in the file can always be removed since it can't cause
1772 * a bmap btree split to do that.
1775 xfs_da_swap_lastblock(
1776 xfs_da_args_t
*args
,
1777 xfs_dablk_t
*dead_blknop
,
1778 struct xfs_buf
**dead_bufp
)
1780 xfs_dablk_t dead_blkno
, last_blkno
, sib_blkno
, par_blkno
;
1781 struct xfs_buf
*dead_buf
, *last_buf
, *sib_buf
, *par_buf
;
1782 xfs_fileoff_t lastoff
;
1786 int error
, w
, entno
, level
, dead_level
;
1787 xfs_da_blkinfo_t
*dead_info
, *sib_info
;
1788 xfs_da_intnode_t
*par_node
, *dead_node
;
1789 xfs_dir2_leaf_t
*dead_leaf2
;
1790 xfs_dahash_t dead_hash
;
1792 trace_xfs_da_swap_lastblock(args
);
1794 dead_buf
= *dead_bufp
;
1795 dead_blkno
= *dead_blknop
;
1798 w
= args
->whichfork
;
1799 ASSERT(w
== XFS_DATA_FORK
);
1801 lastoff
= mp
->m_dirfreeblk
;
1802 error
= xfs_bmap_last_before(tp
, ip
, &lastoff
, w
);
1805 if (unlikely(lastoff
== 0)) {
1806 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW
,
1808 return XFS_ERROR(EFSCORRUPTED
);
1811 * Read the last block in the btree space.
1813 last_blkno
= (xfs_dablk_t
)lastoff
- mp
->m_dirblkfsbs
;
1814 error
= xfs_da_node_read(tp
, ip
, last_blkno
, -1, &last_buf
, w
);
1818 * Copy the last block into the dead buffer and log it.
1820 memcpy(dead_buf
->b_addr
, last_buf
->b_addr
, mp
->m_dirblksize
);
1821 xfs_trans_log_buf(tp
, dead_buf
, 0, mp
->m_dirblksize
- 1);
1822 dead_info
= dead_buf
->b_addr
;
1824 * Get values from the moved block.
1826 if (dead_info
->magic
== cpu_to_be16(XFS_DIR2_LEAFN_MAGIC
)) {
1827 dead_leaf2
= (xfs_dir2_leaf_t
*)dead_info
;
1829 dead_hash
= be32_to_cpu(dead_leaf2
->ents
[be16_to_cpu(dead_leaf2
->hdr
.count
) - 1].hashval
);
1831 ASSERT(dead_info
->magic
== cpu_to_be16(XFS_DA_NODE_MAGIC
));
1832 dead_node
= (xfs_da_intnode_t
*)dead_info
;
1833 dead_level
= be16_to_cpu(dead_node
->hdr
.level
);
1834 dead_hash
= be32_to_cpu(dead_node
->btree
[be16_to_cpu(dead_node
->hdr
.count
) - 1].hashval
);
1836 sib_buf
= par_buf
= NULL
;
1838 * If the moved block has a left sibling, fix up the pointers.
1840 if ((sib_blkno
= be32_to_cpu(dead_info
->back
))) {
1841 error
= xfs_da_node_read(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
);
1844 sib_info
= sib_buf
->b_addr
;
1846 be32_to_cpu(sib_info
->forw
) != last_blkno
||
1847 sib_info
->magic
!= dead_info
->magic
)) {
1848 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1849 XFS_ERRLEVEL_LOW
, mp
);
1850 error
= XFS_ERROR(EFSCORRUPTED
);
1853 sib_info
->forw
= cpu_to_be32(dead_blkno
);
1854 xfs_trans_log_buf(tp
, sib_buf
,
1855 XFS_DA_LOGRANGE(sib_info
, &sib_info
->forw
,
1856 sizeof(sib_info
->forw
)));
1860 * If the moved block has a right sibling, fix up the pointers.
1862 if ((sib_blkno
= be32_to_cpu(dead_info
->forw
))) {
1863 error
= xfs_da_node_read(tp
, ip
, sib_blkno
, -1, &sib_buf
, w
);
1866 sib_info
= sib_buf
->b_addr
;
1868 be32_to_cpu(sib_info
->back
) != last_blkno
||
1869 sib_info
->magic
!= dead_info
->magic
)) {
1870 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1871 XFS_ERRLEVEL_LOW
, mp
);
1872 error
= XFS_ERROR(EFSCORRUPTED
);
1875 sib_info
->back
= cpu_to_be32(dead_blkno
);
1876 xfs_trans_log_buf(tp
, sib_buf
,
1877 XFS_DA_LOGRANGE(sib_info
, &sib_info
->back
,
1878 sizeof(sib_info
->back
)));
1881 par_blkno
= mp
->m_dirleafblk
;
1884 * Walk down the tree looking for the parent of the moved block.
1887 error
= xfs_da_node_read(tp
, ip
, par_blkno
, -1, &par_buf
, w
);
1890 par_node
= par_buf
->b_addr
;
1891 if (unlikely(par_node
->hdr
.info
.magic
!=
1892 cpu_to_be16(XFS_DA_NODE_MAGIC
) ||
1893 (level
>= 0 && level
!= be16_to_cpu(par_node
->hdr
.level
) + 1))) {
1894 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1895 XFS_ERRLEVEL_LOW
, mp
);
1896 error
= XFS_ERROR(EFSCORRUPTED
);
1899 level
= be16_to_cpu(par_node
->hdr
.level
);
1901 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1902 be32_to_cpu(par_node
->btree
[entno
].hashval
) < dead_hash
;
1905 if (unlikely(entno
== be16_to_cpu(par_node
->hdr
.count
))) {
1906 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1907 XFS_ERRLEVEL_LOW
, mp
);
1908 error
= XFS_ERROR(EFSCORRUPTED
);
1911 par_blkno
= be32_to_cpu(par_node
->btree
[entno
].before
);
1912 if (level
== dead_level
+ 1)
1914 xfs_trans_brelse(tp
, par_buf
);
1918 * We're in the right parent block.
1919 * Look for the right entry.
1923 entno
< be16_to_cpu(par_node
->hdr
.count
) &&
1924 be32_to_cpu(par_node
->btree
[entno
].before
) != last_blkno
;
1927 if (entno
< be16_to_cpu(par_node
->hdr
.count
))
1929 par_blkno
= be32_to_cpu(par_node
->hdr
.info
.forw
);
1930 xfs_trans_brelse(tp
, par_buf
);
1932 if (unlikely(par_blkno
== 0)) {
1933 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1934 XFS_ERRLEVEL_LOW
, mp
);
1935 error
= XFS_ERROR(EFSCORRUPTED
);
1938 error
= xfs_da_node_read(tp
, ip
, par_blkno
, -1, &par_buf
, w
);
1941 par_node
= par_buf
->b_addr
;
1943 be16_to_cpu(par_node
->hdr
.level
) != level
||
1944 par_node
->hdr
.info
.magic
!= cpu_to_be16(XFS_DA_NODE_MAGIC
))) {
1945 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1946 XFS_ERRLEVEL_LOW
, mp
);
1947 error
= XFS_ERROR(EFSCORRUPTED
);
1953 * Update the parent entry pointing to the moved block.
1955 par_node
->btree
[entno
].before
= cpu_to_be32(dead_blkno
);
1956 xfs_trans_log_buf(tp
, par_buf
,
1957 XFS_DA_LOGRANGE(par_node
, &par_node
->btree
[entno
].before
,
1958 sizeof(par_node
->btree
[entno
].before
)));
1959 *dead_blknop
= last_blkno
;
1960 *dead_bufp
= last_buf
;
1964 xfs_trans_brelse(tp
, par_buf
);
1966 xfs_trans_brelse(tp
, sib_buf
);
1967 xfs_trans_brelse(tp
, last_buf
);
1972 * Remove a btree block from a directory or attribute.
1975 xfs_da_shrink_inode(
1976 xfs_da_args_t
*args
,
1977 xfs_dablk_t dead_blkno
,
1978 struct xfs_buf
*dead_buf
)
1981 int done
, error
, w
, count
;
1985 trace_xfs_da_shrink_inode(args
);
1988 w
= args
->whichfork
;
1991 if (w
== XFS_DATA_FORK
)
1992 count
= mp
->m_dirblkfsbs
;
1997 * Remove extents. If we get ENOSPC for a dir we have to move
1998 * the last block to the place we want to kill.
2000 if ((error
= xfs_bunmapi(tp
, dp
, dead_blkno
, count
,
2001 xfs_bmapi_aflag(w
)|XFS_BMAPI_METADATA
,
2002 0, args
->firstblock
, args
->flist
,
2003 &done
)) == ENOSPC
) {
2004 if (w
!= XFS_DATA_FORK
)
2006 if ((error
= xfs_da_swap_lastblock(args
, &dead_blkno
,
2013 xfs_trans_binval(tp
, dead_buf
);
2018 * See if the mapping(s) for this btree block are valid, i.e.
2019 * don't contain holes, are logically contiguous, and cover the whole range.
2022 xfs_da_map_covers_blocks(
2024 xfs_bmbt_irec_t
*mapp
,
2031 for (i
= 0, off
= bno
; i
< nmap
; i
++) {
2032 if (mapp
[i
].br_startblock
== HOLESTARTBLOCK
||
2033 mapp
[i
].br_startblock
== DELAYSTARTBLOCK
) {
2036 if (off
!= mapp
[i
].br_startoff
) {
2039 off
+= mapp
[i
].br_blockcount
;
2041 return off
== bno
+ count
;
2045 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2047 * For the single map case, it is assumed that the caller has provided a pointer
2048 * to a valid xfs_buf_map. For the multiple map case, this function will
2049 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2050 * map pointer with the allocated map.
2053 xfs_buf_map_from_irec(
2054 struct xfs_mount
*mp
,
2055 struct xfs_buf_map
**mapp
,
2056 unsigned int *nmaps
,
2057 struct xfs_bmbt_irec
*irecs
,
2058 unsigned int nirecs
)
2060 struct xfs_buf_map
*map
;
2063 ASSERT(*nmaps
== 1);
2064 ASSERT(nirecs
>= 1);
2067 map
= kmem_zalloc(nirecs
* sizeof(struct xfs_buf_map
), KM_SLEEP
);
2075 for (i
= 0; i
< *nmaps
; i
++) {
2076 ASSERT(irecs
[i
].br_startblock
!= DELAYSTARTBLOCK
&&
2077 irecs
[i
].br_startblock
!= HOLESTARTBLOCK
);
2078 map
[i
].bm_bn
= XFS_FSB_TO_DADDR(mp
, irecs
[i
].br_startblock
);
2079 map
[i
].bm_len
= XFS_FSB_TO_BB(mp
, irecs
[i
].br_blockcount
);
2085 * Map the block we are given ready for reading. There are three possible return
2087 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2088 * caller knows not to execute a subsequent read.
2089 * 0 - if we mapped the block successfully
2090 * >0 - positive error number if there was an error.
2094 struct xfs_trans
*trans
,
2095 struct xfs_inode
*dp
,
2097 xfs_daddr_t mappedbno
,
2099 struct xfs_buf_map
**map
,
2102 struct xfs_mount
*mp
= dp
->i_mount
;
2105 struct xfs_bmbt_irec irec
;
2106 struct xfs_bmbt_irec
*irecs
= &irec
;
2109 ASSERT(map
&& *map
);
2110 ASSERT(*nmaps
== 1);
2112 nfsb
= (whichfork
== XFS_DATA_FORK
) ? mp
->m_dirblkfsbs
: 1;
2115 * Caller doesn't have a mapping. -2 means don't complain
2116 * if we land in a hole.
2118 if (mappedbno
== -1 || mappedbno
== -2) {
2120 * Optimize the one-block case.
2123 irecs
= kmem_zalloc(sizeof(irec
) * nfsb
, KM_SLEEP
);
2126 error
= xfs_bmapi_read(dp
, (xfs_fileoff_t
)bno
, nfsb
, irecs
,
2127 &nirecs
, xfs_bmapi_aflag(whichfork
));
2131 irecs
->br_startblock
= XFS_DADDR_TO_FSB(mp
, mappedbno
);
2132 irecs
->br_startoff
= (xfs_fileoff_t
)bno
;
2133 irecs
->br_blockcount
= nfsb
;
2134 irecs
->br_state
= 0;
2138 if (!xfs_da_map_covers_blocks(nirecs
, irecs
, bno
, nfsb
)) {
2139 error
= mappedbno
== -2 ? -1 : XFS_ERROR(EFSCORRUPTED
);
2140 if (unlikely(error
== EFSCORRUPTED
)) {
2141 if (xfs_error_level
>= XFS_ERRLEVEL_LOW
) {
2143 xfs_alert(mp
, "%s: bno %lld dir: inode %lld",
2144 __func__
, (long long)bno
,
2145 (long long)dp
->i_ino
);
2146 for (i
= 0; i
< *nmaps
; i
++) {
2148 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2150 (long long)irecs
[i
].br_startoff
,
2151 (long long)irecs
[i
].br_startblock
,
2152 (long long)irecs
[i
].br_blockcount
,
2156 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2157 XFS_ERRLEVEL_LOW
, mp
);
2161 error
= xfs_buf_map_from_irec(mp
, map
, nmaps
, irecs
, nirecs
);
2169 * Get a buffer for the dir/attr block.
2173 struct xfs_trans
*trans
,
2174 struct xfs_inode
*dp
,
2176 xfs_daddr_t mappedbno
,
2177 struct xfs_buf
**bpp
,
2181 struct xfs_buf_map map
;
2182 struct xfs_buf_map
*mapp
;
2189 error
= xfs_dabuf_map(trans
, dp
, bno
, mappedbno
, whichfork
,
2192 /* mapping a hole is not an error, but we don't continue */
2198 bp
= xfs_trans_get_buf_map(trans
, dp
->i_mount
->m_ddev_targp
,
2200 error
= bp
? bp
->b_error
: XFS_ERROR(EIO
);
2202 xfs_trans_brelse(trans
, bp
);
2216 * Get a buffer for the dir/attr block, fill in the contents.
2220 struct xfs_trans
*trans
,
2221 struct xfs_inode
*dp
,
2223 xfs_daddr_t mappedbno
,
2224 struct xfs_buf
**bpp
,
2226 xfs_buf_iodone_t verifier
)
2229 struct xfs_buf_map map
;
2230 struct xfs_buf_map
*mapp
;
2237 error
= xfs_dabuf_map(trans
, dp
, bno
, mappedbno
, whichfork
,
2240 /* mapping a hole is not an error, but we don't continue */
2246 error
= xfs_trans_read_buf_map(dp
->i_mount
, trans
,
2247 dp
->i_mount
->m_ddev_targp
,
2248 mapp
, nmap
, 0, &bp
, verifier
);
2252 if (whichfork
== XFS_ATTR_FORK
)
2253 xfs_buf_set_ref(bp
, XFS_ATTR_BTREE_REF
);
2255 xfs_buf_set_ref(bp
, XFS_DIR_BTREE_REF
);
2258 * This verification code will be moved to a CRC verification callback
2259 * function so just leave it here unchanged until then.
2262 xfs_dir2_data_hdr_t
*hdr
= bp
->b_addr
;
2263 xfs_dir2_free_t
*free
= bp
->b_addr
;
2264 xfs_da_blkinfo_t
*info
= bp
->b_addr
;
2266 struct xfs_mount
*mp
= dp
->i_mount
;
2268 magic
= be16_to_cpu(info
->magic
);
2269 magic1
= be32_to_cpu(hdr
->magic
);
2271 XFS_TEST_ERROR((magic
!= XFS_DA_NODE_MAGIC
) &&
2272 (magic
!= XFS_ATTR_LEAF_MAGIC
) &&
2273 (magic
!= XFS_DIR2_LEAF1_MAGIC
) &&
2274 (magic
!= XFS_DIR2_LEAFN_MAGIC
) &&
2275 (magic1
!= XFS_DIR2_BLOCK_MAGIC
) &&
2276 (magic1
!= XFS_DIR2_DATA_MAGIC
) &&
2277 (free
->hdr
.magic
!= cpu_to_be32(XFS_DIR2_FREE_MAGIC
)),
2278 mp
, XFS_ERRTAG_DA_READ_BUF
,
2279 XFS_RANDOM_DA_READ_BUF
))) {
2280 trace_xfs_da_btree_corrupt(bp
, _RET_IP_
);
2281 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2282 XFS_ERRLEVEL_LOW
, mp
, info
);
2283 error
= XFS_ERROR(EFSCORRUPTED
);
2284 xfs_trans_brelse(trans
, bp
);
2297 * Readahead the dir/attr block.
2301 struct xfs_trans
*trans
,
2302 struct xfs_inode
*dp
,
2304 xfs_daddr_t mappedbno
,
2306 xfs_buf_iodone_t verifier
)
2308 struct xfs_buf_map map
;
2309 struct xfs_buf_map
*mapp
;
2315 error
= xfs_dabuf_map(trans
, dp
, bno
, mappedbno
, whichfork
,
2318 /* mapping a hole is not an error, but we don't continue */
2324 mappedbno
= mapp
[0].bm_bn
;
2325 xfs_buf_readahead_map(dp
->i_mount
->m_ddev_targp
, mapp
, nmap
, NULL
);
2336 kmem_zone_t
*xfs_da_state_zone
; /* anchor for state struct zone */
2339 * Allocate a dir-state structure.
2340 * We don't put them on the stack since they're large.
2343 xfs_da_state_alloc(void)
2345 return kmem_zone_zalloc(xfs_da_state_zone
, KM_NOFS
);
2349 * Kill the altpath contents of a da-state structure.
2352 xfs_da_state_kill_altpath(xfs_da_state_t
*state
)
2356 for (i
= 0; i
< state
->altpath
.active
; i
++)
2357 state
->altpath
.blk
[i
].bp
= NULL
;
2358 state
->altpath
.active
= 0;
2362 * Free a da-state structure.
2365 xfs_da_state_free(xfs_da_state_t
*state
)
2367 xfs_da_state_kill_altpath(state
);
2369 memset((char *)state
, 0, sizeof(*state
));
2371 kmem_zone_free(xfs_da_state_zone
, state
);