087950fc2eb76f415e3f255ceadbb0928fd89c79
[deliverable/linux.git] / fs / xfs / xfs_da_btree.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_mount.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_dir2.h"
30 #include "xfs_dir2_format.h"
31 #include "xfs_dir2_priv.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_alloc.h"
36 #include "xfs_bmap.h"
37 #include "xfs_attr.h"
38 #include "xfs_attr_leaf.h"
39 #include "xfs_error.h"
40 #include "xfs_trace.h"
41
42 /*
43 * xfs_da_btree.c
44 *
45 * Routines to implement directories as Btrees of hashed names.
46 */
47
48 /*========================================================================
49 * Function prototypes for the kernel.
50 *========================================================================*/
51
52 /*
53 * Routines used for growing the Btree.
54 */
55 STATIC int xfs_da_root_split(xfs_da_state_t *state,
56 xfs_da_state_blk_t *existing_root,
57 xfs_da_state_blk_t *new_child);
58 STATIC int xfs_da_node_split(xfs_da_state_t *state,
59 xfs_da_state_blk_t *existing_blk,
60 xfs_da_state_blk_t *split_blk,
61 xfs_da_state_blk_t *blk_to_add,
62 int treelevel,
63 int *result);
64 STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
65 xfs_da_state_blk_t *node_blk_1,
66 xfs_da_state_blk_t *node_blk_2);
67 STATIC void xfs_da_node_add(xfs_da_state_t *state,
68 xfs_da_state_blk_t *old_node_blk,
69 xfs_da_state_blk_t *new_node_blk);
70
71 /*
72 * Routines used for shrinking the Btree.
73 */
74 STATIC int xfs_da_root_join(xfs_da_state_t *state,
75 xfs_da_state_blk_t *root_blk);
76 STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
77 STATIC void xfs_da_node_remove(xfs_da_state_t *state,
78 xfs_da_state_blk_t *drop_blk);
79 STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
80 xfs_da_state_blk_t *src_node_blk,
81 xfs_da_state_blk_t *dst_node_blk);
82
83 /*
84 * Utility routines.
85 */
86 STATIC uint xfs_da_node_lasthash(struct xfs_buf *bp, int *count);
87 STATIC int xfs_da_node_order(struct xfs_buf *node1_bp,
88 struct xfs_buf *node2_bp);
89 STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
90 xfs_da_state_blk_t *drop_blk,
91 xfs_da_state_blk_t *save_blk);
92 STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
93
94 static void
95 xfs_da_node_verify(
96 struct xfs_buf *bp)
97 {
98 struct xfs_mount *mp = bp->b_target->bt_mount;
99 struct xfs_da_node_hdr *hdr = bp->b_addr;
100 int block_ok = 0;
101
102 block_ok = hdr->info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC);
103 block_ok = block_ok &&
104 be16_to_cpu(hdr->level) > 0 &&
105 be16_to_cpu(hdr->count) > 0 ;
106 if (!block_ok) {
107 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, hdr);
108 xfs_buf_ioerror(bp, EFSCORRUPTED);
109 }
110
111 }
112
113 static void
114 xfs_da_node_write_verify(
115 struct xfs_buf *bp)
116 {
117 xfs_da_node_verify(bp);
118 }
119
120 static void
121 xfs_da_node_read_verify(
122 struct xfs_buf *bp)
123 {
124 struct xfs_mount *mp = bp->b_target->bt_mount;
125 struct xfs_da_blkinfo *info = bp->b_addr;
126
127 switch (be16_to_cpu(info->magic)) {
128 case XFS_DA_NODE_MAGIC:
129 xfs_da_node_verify(bp);
130 break;
131 case XFS_ATTR_LEAF_MAGIC:
132 xfs_attr_leaf_read_verify(bp);
133 return;
134 case XFS_DIR2_LEAFN_MAGIC:
135 xfs_dir2_leafn_read_verify(bp);
136 return;
137 default:
138 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
139 mp, info);
140 xfs_buf_ioerror(bp, EFSCORRUPTED);
141 break;
142 }
143
144 bp->b_pre_io = xfs_da_node_write_verify;
145 bp->b_iodone = NULL;
146 xfs_buf_ioend(bp, 0);
147 }
148
149 int
150 xfs_da_node_read(
151 struct xfs_trans *tp,
152 struct xfs_inode *dp,
153 xfs_dablk_t bno,
154 xfs_daddr_t mappedbno,
155 struct xfs_buf **bpp,
156 int which_fork)
157 {
158 return xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
159 which_fork, xfs_da_node_read_verify);
160 }
161
162 /*========================================================================
163 * Routines used for growing the Btree.
164 *========================================================================*/
165
166 /*
167 * Create the initial contents of an intermediate node.
168 */
169 int
170 xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
171 struct xfs_buf **bpp, int whichfork)
172 {
173 xfs_da_intnode_t *node;
174 struct xfs_buf *bp;
175 int error;
176 xfs_trans_t *tp;
177
178 trace_xfs_da_node_create(args);
179
180 tp = args->trans;
181 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
182 if (error)
183 return(error);
184 ASSERT(bp != NULL);
185 node = bp->b_addr;
186 node->hdr.info.forw = 0;
187 node->hdr.info.back = 0;
188 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
189 node->hdr.info.pad = 0;
190 node->hdr.count = 0;
191 node->hdr.level = cpu_to_be16(level);
192
193 xfs_trans_log_buf(tp, bp,
194 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
195
196 bp->b_pre_io = xfs_da_node_write_verify;
197 *bpp = bp;
198 return(0);
199 }
200
201 /*
202 * Split a leaf node, rebalance, then possibly split
203 * intermediate nodes, rebalance, etc.
204 */
205 int /* error */
206 xfs_da_split(xfs_da_state_t *state)
207 {
208 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
209 xfs_da_intnode_t *node;
210 struct xfs_buf *bp;
211 int max, action, error, i;
212
213 trace_xfs_da_split(state->args);
214
215 /*
216 * Walk back up the tree splitting/inserting/adjusting as necessary.
217 * If we need to insert and there isn't room, split the node, then
218 * decide which fragment to insert the new block from below into.
219 * Note that we may split the root this way, but we need more fixup.
220 */
221 max = state->path.active - 1;
222 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
223 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
224 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
225
226 addblk = &state->path.blk[max]; /* initial dummy value */
227 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
228 oldblk = &state->path.blk[i];
229 newblk = &state->altpath.blk[i];
230
231 /*
232 * If a leaf node then
233 * Allocate a new leaf node, then rebalance across them.
234 * else if an intermediate node then
235 * We split on the last layer, must we split the node?
236 */
237 switch (oldblk->magic) {
238 case XFS_ATTR_LEAF_MAGIC:
239 error = xfs_attr_leaf_split(state, oldblk, newblk);
240 if ((error != 0) && (error != ENOSPC)) {
241 return(error); /* GROT: attr is inconsistent */
242 }
243 if (!error) {
244 addblk = newblk;
245 break;
246 }
247 /*
248 * Entry wouldn't fit, split the leaf again.
249 */
250 state->extravalid = 1;
251 if (state->inleaf) {
252 state->extraafter = 0; /* before newblk */
253 trace_xfs_attr_leaf_split_before(state->args);
254 error = xfs_attr_leaf_split(state, oldblk,
255 &state->extrablk);
256 } else {
257 state->extraafter = 1; /* after newblk */
258 trace_xfs_attr_leaf_split_after(state->args);
259 error = xfs_attr_leaf_split(state, newblk,
260 &state->extrablk);
261 }
262 if (error)
263 return(error); /* GROT: attr inconsistent */
264 addblk = newblk;
265 break;
266 case XFS_DIR2_LEAFN_MAGIC:
267 error = xfs_dir2_leafn_split(state, oldblk, newblk);
268 if (error)
269 return error;
270 addblk = newblk;
271 break;
272 case XFS_DA_NODE_MAGIC:
273 error = xfs_da_node_split(state, oldblk, newblk, addblk,
274 max - i, &action);
275 addblk->bp = NULL;
276 if (error)
277 return(error); /* GROT: dir is inconsistent */
278 /*
279 * Record the newly split block for the next time thru?
280 */
281 if (action)
282 addblk = newblk;
283 else
284 addblk = NULL;
285 break;
286 }
287
288 /*
289 * Update the btree to show the new hashval for this child.
290 */
291 xfs_da_fixhashpath(state, &state->path);
292 }
293 if (!addblk)
294 return(0);
295
296 /*
297 * Split the root node.
298 */
299 ASSERT(state->path.active == 0);
300 oldblk = &state->path.blk[0];
301 error = xfs_da_root_split(state, oldblk, addblk);
302 if (error) {
303 addblk->bp = NULL;
304 return(error); /* GROT: dir is inconsistent */
305 }
306
307 /*
308 * Update pointers to the node which used to be block 0 and
309 * just got bumped because of the addition of a new root node.
310 * There might be three blocks involved if a double split occurred,
311 * and the original block 0 could be at any position in the list.
312 */
313
314 node = oldblk->bp->b_addr;
315 if (node->hdr.info.forw) {
316 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
317 bp = addblk->bp;
318 } else {
319 ASSERT(state->extravalid);
320 bp = state->extrablk.bp;
321 }
322 node = bp->b_addr;
323 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
324 xfs_trans_log_buf(state->args->trans, bp,
325 XFS_DA_LOGRANGE(node, &node->hdr.info,
326 sizeof(node->hdr.info)));
327 }
328 node = oldblk->bp->b_addr;
329 if (node->hdr.info.back) {
330 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
331 bp = addblk->bp;
332 } else {
333 ASSERT(state->extravalid);
334 bp = state->extrablk.bp;
335 }
336 node = bp->b_addr;
337 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
338 xfs_trans_log_buf(state->args->trans, bp,
339 XFS_DA_LOGRANGE(node, &node->hdr.info,
340 sizeof(node->hdr.info)));
341 }
342 addblk->bp = NULL;
343 return(0);
344 }
345
346 /*
347 * Split the root. We have to create a new root and point to the two
348 * parts (the split old root) that we just created. Copy block zero to
349 * the EOF, extending the inode in process.
350 */
351 STATIC int /* error */
352 xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
353 xfs_da_state_blk_t *blk2)
354 {
355 xfs_da_intnode_t *node, *oldroot;
356 xfs_da_args_t *args;
357 xfs_dablk_t blkno;
358 struct xfs_buf *bp;
359 int error, size;
360 xfs_inode_t *dp;
361 xfs_trans_t *tp;
362 xfs_mount_t *mp;
363 xfs_dir2_leaf_t *leaf;
364
365 trace_xfs_da_root_split(state->args);
366
367 /*
368 * Copy the existing (incorrect) block from the root node position
369 * to a free space somewhere.
370 */
371 args = state->args;
372 ASSERT(args != NULL);
373 error = xfs_da_grow_inode(args, &blkno);
374 if (error)
375 return(error);
376 dp = args->dp;
377 tp = args->trans;
378 mp = state->mp;
379 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
380 if (error)
381 return(error);
382 ASSERT(bp != NULL);
383 node = bp->b_addr;
384 oldroot = blk1->bp->b_addr;
385 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
386 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
387 (char *)oldroot);
388 } else {
389 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
390 leaf = (xfs_dir2_leaf_t *)oldroot;
391 size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
392 (char *)leaf);
393 }
394 memcpy(node, oldroot, size);
395 xfs_trans_log_buf(tp, bp, 0, size - 1);
396
397 bp->b_pre_io = blk1->bp->b_pre_io;
398 blk1->bp = bp;
399 blk1->blkno = blkno;
400
401 /*
402 * Set up the new root node.
403 */
404 error = xfs_da_node_create(args,
405 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
406 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
407 if (error)
408 return(error);
409 node = bp->b_addr;
410 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
411 node->btree[0].before = cpu_to_be32(blk1->blkno);
412 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
413 node->btree[1].before = cpu_to_be32(blk2->blkno);
414 node->hdr.count = cpu_to_be16(2);
415
416 #ifdef DEBUG
417 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
418 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
419 blk1->blkno < mp->m_dirfreeblk);
420 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
421 blk2->blkno < mp->m_dirfreeblk);
422 }
423 #endif
424
425 /* Header is already logged by xfs_da_node_create */
426 xfs_trans_log_buf(tp, bp,
427 XFS_DA_LOGRANGE(node, node->btree,
428 sizeof(xfs_da_node_entry_t) * 2));
429
430 return(0);
431 }
432
433 /*
434 * Split the node, rebalance, then add the new entry.
435 */
436 STATIC int /* error */
437 xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
438 xfs_da_state_blk_t *newblk,
439 xfs_da_state_blk_t *addblk,
440 int treelevel, int *result)
441 {
442 xfs_da_intnode_t *node;
443 xfs_dablk_t blkno;
444 int newcount, error;
445 int useextra;
446
447 trace_xfs_da_node_split(state->args);
448
449 node = oldblk->bp->b_addr;
450 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
451
452 /*
453 * With V2 dirs the extra block is data or freespace.
454 */
455 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
456 newcount = 1 + useextra;
457 /*
458 * Do we have to split the node?
459 */
460 if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
461 /*
462 * Allocate a new node, add to the doubly linked chain of
463 * nodes, then move some of our excess entries into it.
464 */
465 error = xfs_da_grow_inode(state->args, &blkno);
466 if (error)
467 return(error); /* GROT: dir is inconsistent */
468
469 error = xfs_da_node_create(state->args, blkno, treelevel,
470 &newblk->bp, state->args->whichfork);
471 if (error)
472 return(error); /* GROT: dir is inconsistent */
473 newblk->blkno = blkno;
474 newblk->magic = XFS_DA_NODE_MAGIC;
475 xfs_da_node_rebalance(state, oldblk, newblk);
476 error = xfs_da_blk_link(state, oldblk, newblk);
477 if (error)
478 return(error);
479 *result = 1;
480 } else {
481 *result = 0;
482 }
483
484 /*
485 * Insert the new entry(s) into the correct block
486 * (updating last hashval in the process).
487 *
488 * xfs_da_node_add() inserts BEFORE the given index,
489 * and as a result of using node_lookup_int() we always
490 * point to a valid entry (not after one), but a split
491 * operation always results in a new block whose hashvals
492 * FOLLOW the current block.
493 *
494 * If we had double-split op below us, then add the extra block too.
495 */
496 node = oldblk->bp->b_addr;
497 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
498 oldblk->index++;
499 xfs_da_node_add(state, oldblk, addblk);
500 if (useextra) {
501 if (state->extraafter)
502 oldblk->index++;
503 xfs_da_node_add(state, oldblk, &state->extrablk);
504 state->extravalid = 0;
505 }
506 } else {
507 newblk->index++;
508 xfs_da_node_add(state, newblk, addblk);
509 if (useextra) {
510 if (state->extraafter)
511 newblk->index++;
512 xfs_da_node_add(state, newblk, &state->extrablk);
513 state->extravalid = 0;
514 }
515 }
516
517 return(0);
518 }
519
520 /*
521 * Balance the btree elements between two intermediate nodes,
522 * usually one full and one empty.
523 *
524 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
525 */
526 STATIC void
527 xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
528 xfs_da_state_blk_t *blk2)
529 {
530 xfs_da_intnode_t *node1, *node2, *tmpnode;
531 xfs_da_node_entry_t *btree_s, *btree_d;
532 int count, tmp;
533 xfs_trans_t *tp;
534
535 trace_xfs_da_node_rebalance(state->args);
536
537 node1 = blk1->bp->b_addr;
538 node2 = blk2->bp->b_addr;
539 /*
540 * Figure out how many entries need to move, and in which direction.
541 * Swap the nodes around if that makes it simpler.
542 */
543 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
544 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
545 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
546 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
547 tmpnode = node1;
548 node1 = node2;
549 node2 = tmpnode;
550 }
551 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
552 ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
553 count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
554 if (count == 0)
555 return;
556 tp = state->args->trans;
557 /*
558 * Two cases: high-to-low and low-to-high.
559 */
560 if (count > 0) {
561 /*
562 * Move elements in node2 up to make a hole.
563 */
564 if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
565 tmp *= (uint)sizeof(xfs_da_node_entry_t);
566 btree_s = &node2->btree[0];
567 btree_d = &node2->btree[count];
568 memmove(btree_d, btree_s, tmp);
569 }
570
571 /*
572 * Move the req'd B-tree elements from high in node1 to
573 * low in node2.
574 */
575 be16_add_cpu(&node2->hdr.count, count);
576 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
577 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
578 btree_d = &node2->btree[0];
579 memcpy(btree_d, btree_s, tmp);
580 be16_add_cpu(&node1->hdr.count, -count);
581 } else {
582 /*
583 * Move the req'd B-tree elements from low in node2 to
584 * high in node1.
585 */
586 count = -count;
587 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
588 btree_s = &node2->btree[0];
589 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
590 memcpy(btree_d, btree_s, tmp);
591 be16_add_cpu(&node1->hdr.count, count);
592 xfs_trans_log_buf(tp, blk1->bp,
593 XFS_DA_LOGRANGE(node1, btree_d, tmp));
594
595 /*
596 * Move elements in node2 down to fill the hole.
597 */
598 tmp = be16_to_cpu(node2->hdr.count) - count;
599 tmp *= (uint)sizeof(xfs_da_node_entry_t);
600 btree_s = &node2->btree[count];
601 btree_d = &node2->btree[0];
602 memmove(btree_d, btree_s, tmp);
603 be16_add_cpu(&node2->hdr.count, -count);
604 }
605
606 /*
607 * Log header of node 1 and all current bits of node 2.
608 */
609 xfs_trans_log_buf(tp, blk1->bp,
610 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
611 xfs_trans_log_buf(tp, blk2->bp,
612 XFS_DA_LOGRANGE(node2, &node2->hdr,
613 sizeof(node2->hdr) +
614 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
615
616 /*
617 * Record the last hashval from each block for upward propagation.
618 * (note: don't use the swapped node pointers)
619 */
620 node1 = blk1->bp->b_addr;
621 node2 = blk2->bp->b_addr;
622 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
623 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
624
625 /*
626 * Adjust the expected index for insertion.
627 */
628 if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
629 blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
630 blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
631 }
632 }
633
634 /*
635 * Add a new entry to an intermediate node.
636 */
637 STATIC void
638 xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
639 xfs_da_state_blk_t *newblk)
640 {
641 xfs_da_intnode_t *node;
642 xfs_da_node_entry_t *btree;
643 int tmp;
644
645 trace_xfs_da_node_add(state->args);
646
647 node = oldblk->bp->b_addr;
648 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
649 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
650 ASSERT(newblk->blkno != 0);
651 if (state->args->whichfork == XFS_DATA_FORK)
652 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
653 newblk->blkno < state->mp->m_dirfreeblk);
654
655 /*
656 * We may need to make some room before we insert the new node.
657 */
658 tmp = 0;
659 btree = &node->btree[ oldblk->index ];
660 if (oldblk->index < be16_to_cpu(node->hdr.count)) {
661 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
662 memmove(btree + 1, btree, tmp);
663 }
664 btree->hashval = cpu_to_be32(newblk->hashval);
665 btree->before = cpu_to_be32(newblk->blkno);
666 xfs_trans_log_buf(state->args->trans, oldblk->bp,
667 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
668 be16_add_cpu(&node->hdr.count, 1);
669 xfs_trans_log_buf(state->args->trans, oldblk->bp,
670 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
671
672 /*
673 * Copy the last hash value from the oldblk to propagate upwards.
674 */
675 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
676 }
677
678 /*========================================================================
679 * Routines used for shrinking the Btree.
680 *========================================================================*/
681
682 /*
683 * Deallocate an empty leaf node, remove it from its parent,
684 * possibly deallocating that block, etc...
685 */
686 int
687 xfs_da_join(xfs_da_state_t *state)
688 {
689 xfs_da_state_blk_t *drop_blk, *save_blk;
690 int action, error;
691
692 trace_xfs_da_join(state->args);
693
694 action = 0;
695 drop_blk = &state->path.blk[ state->path.active-1 ];
696 save_blk = &state->altpath.blk[ state->path.active-1 ];
697 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
698 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
699 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
700
701 /*
702 * Walk back up the tree joining/deallocating as necessary.
703 * When we stop dropping blocks, break out.
704 */
705 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
706 state->path.active--) {
707 /*
708 * See if we can combine the block with a neighbor.
709 * (action == 0) => no options, just leave
710 * (action == 1) => coalesce, then unlink
711 * (action == 2) => block empty, unlink it
712 */
713 switch (drop_blk->magic) {
714 case XFS_ATTR_LEAF_MAGIC:
715 error = xfs_attr_leaf_toosmall(state, &action);
716 if (error)
717 return(error);
718 if (action == 0)
719 return(0);
720 xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
721 break;
722 case XFS_DIR2_LEAFN_MAGIC:
723 error = xfs_dir2_leafn_toosmall(state, &action);
724 if (error)
725 return error;
726 if (action == 0)
727 return 0;
728 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
729 break;
730 case XFS_DA_NODE_MAGIC:
731 /*
732 * Remove the offending node, fixup hashvals,
733 * check for a toosmall neighbor.
734 */
735 xfs_da_node_remove(state, drop_blk);
736 xfs_da_fixhashpath(state, &state->path);
737 error = xfs_da_node_toosmall(state, &action);
738 if (error)
739 return(error);
740 if (action == 0)
741 return 0;
742 xfs_da_node_unbalance(state, drop_blk, save_blk);
743 break;
744 }
745 xfs_da_fixhashpath(state, &state->altpath);
746 error = xfs_da_blk_unlink(state, drop_blk, save_blk);
747 xfs_da_state_kill_altpath(state);
748 if (error)
749 return(error);
750 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
751 drop_blk->bp);
752 drop_blk->bp = NULL;
753 if (error)
754 return(error);
755 }
756 /*
757 * We joined all the way to the top. If it turns out that
758 * we only have one entry in the root, make the child block
759 * the new root.
760 */
761 xfs_da_node_remove(state, drop_blk);
762 xfs_da_fixhashpath(state, &state->path);
763 error = xfs_da_root_join(state, &state->path.blk[0]);
764 return(error);
765 }
766
767 #ifdef DEBUG
768 static void
769 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
770 {
771 __be16 magic = blkinfo->magic;
772
773 if (level == 1) {
774 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
775 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
776 } else
777 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
778 ASSERT(!blkinfo->forw);
779 ASSERT(!blkinfo->back);
780 }
781 #else /* !DEBUG */
782 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
783 #endif /* !DEBUG */
784
785 /*
786 * We have only one entry in the root. Copy the only remaining child of
787 * the old root to block 0 as the new root node.
788 */
789 STATIC int
790 xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
791 {
792 xfs_da_intnode_t *oldroot;
793 xfs_da_args_t *args;
794 xfs_dablk_t child;
795 struct xfs_buf *bp;
796 int error;
797
798 trace_xfs_da_root_join(state->args);
799
800 args = state->args;
801 ASSERT(args != NULL);
802 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
803 oldroot = root_blk->bp->b_addr;
804 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
805 ASSERT(!oldroot->hdr.info.forw);
806 ASSERT(!oldroot->hdr.info.back);
807
808 /*
809 * If the root has more than one child, then don't do anything.
810 */
811 if (be16_to_cpu(oldroot->hdr.count) > 1)
812 return(0);
813
814 /*
815 * Read in the (only) child block, then copy those bytes into
816 * the root block's buffer and free the original child block.
817 */
818 child = be32_to_cpu(oldroot->btree[0].before);
819 ASSERT(child != 0);
820 error = xfs_da_node_read(args->trans, args->dp, child, -1, &bp,
821 args->whichfork);
822 if (error)
823 return(error);
824 ASSERT(bp != NULL);
825 xfs_da_blkinfo_onlychild_validate(bp->b_addr,
826 be16_to_cpu(oldroot->hdr.level));
827
828 /*
829 * This could be copying a leaf back into the root block in the case of
830 * there only being a single leaf block left in the tree. Hence we have
831 * to update the pre_io pointer as well to match the buffer type change
832 * that could occur.
833 */
834 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
835 root_blk->bp->b_pre_io = bp->b_pre_io;
836 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
837 error = xfs_da_shrink_inode(args, child, bp);
838 return(error);
839 }
840
841 /*
842 * Check a node block and its neighbors to see if the block should be
843 * collapsed into one or the other neighbor. Always keep the block
844 * with the smaller block number.
845 * If the current block is over 50% full, don't try to join it, return 0.
846 * If the block is empty, fill in the state structure and return 2.
847 * If it can be collapsed, fill in the state structure and return 1.
848 * If nothing can be done, return 0.
849 */
850 STATIC int
851 xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
852 {
853 xfs_da_intnode_t *node;
854 xfs_da_state_blk_t *blk;
855 xfs_da_blkinfo_t *info;
856 int count, forward, error, retval, i;
857 xfs_dablk_t blkno;
858 struct xfs_buf *bp;
859
860 trace_xfs_da_node_toosmall(state->args);
861
862 /*
863 * Check for the degenerate case of the block being over 50% full.
864 * If so, it's not worth even looking to see if we might be able
865 * to coalesce with a sibling.
866 */
867 blk = &state->path.blk[ state->path.active-1 ];
868 info = blk->bp->b_addr;
869 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
870 node = (xfs_da_intnode_t *)info;
871 count = be16_to_cpu(node->hdr.count);
872 if (count > (state->node_ents >> 1)) {
873 *action = 0; /* blk over 50%, don't try to join */
874 return(0); /* blk over 50%, don't try to join */
875 }
876
877 /*
878 * Check for the degenerate case of the block being empty.
879 * If the block is empty, we'll simply delete it, no need to
880 * coalesce it with a sibling block. We choose (arbitrarily)
881 * to merge with the forward block unless it is NULL.
882 */
883 if (count == 0) {
884 /*
885 * Make altpath point to the block we want to keep and
886 * path point to the block we want to drop (this one).
887 */
888 forward = (info->forw != 0);
889 memcpy(&state->altpath, &state->path, sizeof(state->path));
890 error = xfs_da_path_shift(state, &state->altpath, forward,
891 0, &retval);
892 if (error)
893 return(error);
894 if (retval) {
895 *action = 0;
896 } else {
897 *action = 2;
898 }
899 return(0);
900 }
901
902 /*
903 * Examine each sibling block to see if we can coalesce with
904 * at least 25% free space to spare. We need to figure out
905 * whether to merge with the forward or the backward block.
906 * We prefer coalescing with the lower numbered sibling so as
907 * to shrink a directory over time.
908 */
909 /* start with smaller blk num */
910 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
911 for (i = 0; i < 2; forward = !forward, i++) {
912 if (forward)
913 blkno = be32_to_cpu(info->forw);
914 else
915 blkno = be32_to_cpu(info->back);
916 if (blkno == 0)
917 continue;
918 error = xfs_da_node_read(state->args->trans, state->args->dp,
919 blkno, -1, &bp, state->args->whichfork);
920 if (error)
921 return(error);
922 ASSERT(bp != NULL);
923
924 node = (xfs_da_intnode_t *)info;
925 count = state->node_ents;
926 count -= state->node_ents >> 2;
927 count -= be16_to_cpu(node->hdr.count);
928 node = bp->b_addr;
929 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
930 count -= be16_to_cpu(node->hdr.count);
931 xfs_trans_brelse(state->args->trans, bp);
932 if (count >= 0)
933 break; /* fits with at least 25% to spare */
934 }
935 if (i >= 2) {
936 *action = 0;
937 return(0);
938 }
939
940 /*
941 * Make altpath point to the block we want to keep (the lower
942 * numbered block) and path point to the block we want to drop.
943 */
944 memcpy(&state->altpath, &state->path, sizeof(state->path));
945 if (blkno < blk->blkno) {
946 error = xfs_da_path_shift(state, &state->altpath, forward,
947 0, &retval);
948 if (error) {
949 return(error);
950 }
951 if (retval) {
952 *action = 0;
953 return(0);
954 }
955 } else {
956 error = xfs_da_path_shift(state, &state->path, forward,
957 0, &retval);
958 if (error) {
959 return(error);
960 }
961 if (retval) {
962 *action = 0;
963 return(0);
964 }
965 }
966 *action = 1;
967 return(0);
968 }
969
970 /*
971 * Walk back up the tree adjusting hash values as necessary,
972 * when we stop making changes, return.
973 */
974 void
975 xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
976 {
977 xfs_da_state_blk_t *blk;
978 xfs_da_intnode_t *node;
979 xfs_da_node_entry_t *btree;
980 xfs_dahash_t lasthash=0;
981 int level, count;
982
983 trace_xfs_da_fixhashpath(state->args);
984
985 level = path->active-1;
986 blk = &path->blk[ level ];
987 switch (blk->magic) {
988 case XFS_ATTR_LEAF_MAGIC:
989 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
990 if (count == 0)
991 return;
992 break;
993 case XFS_DIR2_LEAFN_MAGIC:
994 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
995 if (count == 0)
996 return;
997 break;
998 case XFS_DA_NODE_MAGIC:
999 lasthash = xfs_da_node_lasthash(blk->bp, &count);
1000 if (count == 0)
1001 return;
1002 break;
1003 }
1004 for (blk--, level--; level >= 0; blk--, level--) {
1005 node = blk->bp->b_addr;
1006 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1007 btree = &node->btree[ blk->index ];
1008 if (be32_to_cpu(btree->hashval) == lasthash)
1009 break;
1010 blk->hashval = lasthash;
1011 btree->hashval = cpu_to_be32(lasthash);
1012 xfs_trans_log_buf(state->args->trans, blk->bp,
1013 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
1014
1015 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1016 }
1017 }
1018
1019 /*
1020 * Remove an entry from an intermediate node.
1021 */
1022 STATIC void
1023 xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
1024 {
1025 xfs_da_intnode_t *node;
1026 xfs_da_node_entry_t *btree;
1027 int tmp;
1028
1029 trace_xfs_da_node_remove(state->args);
1030
1031 node = drop_blk->bp->b_addr;
1032 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
1033 ASSERT(drop_blk->index >= 0);
1034
1035 /*
1036 * Copy over the offending entry, or just zero it out.
1037 */
1038 btree = &node->btree[drop_blk->index];
1039 if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
1040 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
1041 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1042 memmove(btree, btree + 1, tmp);
1043 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1044 XFS_DA_LOGRANGE(node, btree, tmp));
1045 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
1046 }
1047 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
1048 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1049 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
1050 be16_add_cpu(&node->hdr.count, -1);
1051 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1052 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
1053
1054 /*
1055 * Copy the last hash value from the block to propagate upwards.
1056 */
1057 btree--;
1058 drop_blk->hashval = be32_to_cpu(btree->hashval);
1059 }
1060
1061 /*
1062 * Unbalance the btree elements between two intermediate nodes,
1063 * move all Btree elements from one node into another.
1064 */
1065 STATIC void
1066 xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1067 xfs_da_state_blk_t *save_blk)
1068 {
1069 xfs_da_intnode_t *drop_node, *save_node;
1070 xfs_da_node_entry_t *btree;
1071 int tmp;
1072 xfs_trans_t *tp;
1073
1074 trace_xfs_da_node_unbalance(state->args);
1075
1076 drop_node = drop_blk->bp->b_addr;
1077 save_node = save_blk->bp->b_addr;
1078 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1079 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1080 tp = state->args->trans;
1081
1082 /*
1083 * If the dying block has lower hashvals, then move all the
1084 * elements in the remaining block up to make a hole.
1085 */
1086 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
1087 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
1088 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
1089 {
1090 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1091 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1092 memmove(btree, &save_node->btree[0], tmp);
1093 btree = &save_node->btree[0];
1094 xfs_trans_log_buf(tp, save_blk->bp,
1095 XFS_DA_LOGRANGE(save_node, btree,
1096 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1097 sizeof(xfs_da_node_entry_t)));
1098 } else {
1099 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1100 xfs_trans_log_buf(tp, save_blk->bp,
1101 XFS_DA_LOGRANGE(save_node, btree,
1102 be16_to_cpu(drop_node->hdr.count) *
1103 sizeof(xfs_da_node_entry_t)));
1104 }
1105
1106 /*
1107 * Move all the B-tree elements from drop_blk to save_blk.
1108 */
1109 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1110 memcpy(btree, &drop_node->btree[0], tmp);
1111 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1112
1113 xfs_trans_log_buf(tp, save_blk->bp,
1114 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1115 sizeof(save_node->hdr)));
1116
1117 /*
1118 * Save the last hashval in the remaining block for upward propagation.
1119 */
1120 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1121 }
1122
1123 /*========================================================================
1124 * Routines used for finding things in the Btree.
1125 *========================================================================*/
1126
1127 /*
1128 * Walk down the Btree looking for a particular filename, filling
1129 * in the state structure as we go.
1130 *
1131 * We will set the state structure to point to each of the elements
1132 * in each of the nodes where either the hashval is or should be.
1133 *
1134 * We support duplicate hashval's so for each entry in the current
1135 * node that could contain the desired hashval, descend. This is a
1136 * pruned depth-first tree search.
1137 */
1138 int /* error */
1139 xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1140 {
1141 xfs_da_state_blk_t *blk;
1142 xfs_da_blkinfo_t *curr;
1143 xfs_da_intnode_t *node;
1144 xfs_da_node_entry_t *btree;
1145 xfs_dablk_t blkno;
1146 int probe, span, max, error, retval;
1147 xfs_dahash_t hashval, btreehashval;
1148 xfs_da_args_t *args;
1149
1150 args = state->args;
1151
1152 /*
1153 * Descend thru the B-tree searching each level for the right
1154 * node to use, until the right hashval is found.
1155 */
1156 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1157 for (blk = &state->path.blk[0], state->path.active = 1;
1158 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1159 blk++, state->path.active++) {
1160 /*
1161 * Read the next node down in the tree.
1162 */
1163 blk->blkno = blkno;
1164 error = xfs_da_node_read(args->trans, args->dp, blkno,
1165 -1, &blk->bp, args->whichfork);
1166 if (error) {
1167 blk->blkno = 0;
1168 state->path.active--;
1169 return(error);
1170 }
1171 curr = blk->bp->b_addr;
1172 blk->magic = be16_to_cpu(curr->magic);
1173 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1174 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1175 blk->magic == XFS_ATTR_LEAF_MAGIC);
1176
1177 /*
1178 * Search an intermediate node for a match.
1179 */
1180 if (blk->magic == XFS_DA_NODE_MAGIC) {
1181 node = blk->bp->b_addr;
1182 max = be16_to_cpu(node->hdr.count);
1183 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1184
1185 /*
1186 * Binary search. (note: small blocks will skip loop)
1187 */
1188 probe = span = max / 2;
1189 hashval = args->hashval;
1190 for (btree = &node->btree[probe]; span > 4;
1191 btree = &node->btree[probe]) {
1192 span /= 2;
1193 btreehashval = be32_to_cpu(btree->hashval);
1194 if (btreehashval < hashval)
1195 probe += span;
1196 else if (btreehashval > hashval)
1197 probe -= span;
1198 else
1199 break;
1200 }
1201 ASSERT((probe >= 0) && (probe < max));
1202 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1203
1204 /*
1205 * Since we may have duplicate hashval's, find the first
1206 * matching hashval in the node.
1207 */
1208 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1209 btree--;
1210 probe--;
1211 }
1212 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1213 btree++;
1214 probe++;
1215 }
1216
1217 /*
1218 * Pick the right block to descend on.
1219 */
1220 if (probe == max) {
1221 blk->index = max-1;
1222 blkno = be32_to_cpu(node->btree[max-1].before);
1223 } else {
1224 blk->index = probe;
1225 blkno = be32_to_cpu(btree->before);
1226 }
1227 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1228 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1229 break;
1230 } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1231 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1232 break;
1233 }
1234 }
1235
1236 /*
1237 * A leaf block that ends in the hashval that we are interested in
1238 * (final hashval == search hashval) means that the next block may
1239 * contain more entries with the same hashval, shift upward to the
1240 * next leaf and keep searching.
1241 */
1242 for (;;) {
1243 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1244 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1245 &blk->index, state);
1246 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1247 retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1248 blk->index = args->index;
1249 args->blkno = blk->blkno;
1250 } else {
1251 ASSERT(0);
1252 return XFS_ERROR(EFSCORRUPTED);
1253 }
1254 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1255 (blk->hashval == args->hashval)) {
1256 error = xfs_da_path_shift(state, &state->path, 1, 1,
1257 &retval);
1258 if (error)
1259 return(error);
1260 if (retval == 0) {
1261 continue;
1262 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1263 /* path_shift() gives ENOENT */
1264 retval = XFS_ERROR(ENOATTR);
1265 }
1266 }
1267 break;
1268 }
1269 *result = retval;
1270 return(0);
1271 }
1272
1273 /*========================================================================
1274 * Utility routines.
1275 *========================================================================*/
1276
1277 /*
1278 * Link a new block into a doubly linked list of blocks (of whatever type).
1279 */
1280 int /* error */
1281 xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1282 xfs_da_state_blk_t *new_blk)
1283 {
1284 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1285 xfs_da_args_t *args;
1286 int before=0, error;
1287 struct xfs_buf *bp;
1288
1289 /*
1290 * Set up environment.
1291 */
1292 args = state->args;
1293 ASSERT(args != NULL);
1294 old_info = old_blk->bp->b_addr;
1295 new_info = new_blk->bp->b_addr;
1296 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1297 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1298 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1299 ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1300 ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1301 ASSERT(old_blk->magic == new_blk->magic);
1302
1303 switch (old_blk->magic) {
1304 case XFS_ATTR_LEAF_MAGIC:
1305 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1306 break;
1307 case XFS_DIR2_LEAFN_MAGIC:
1308 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1309 break;
1310 case XFS_DA_NODE_MAGIC:
1311 before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1312 break;
1313 }
1314
1315 /*
1316 * Link blocks in appropriate order.
1317 */
1318 if (before) {
1319 /*
1320 * Link new block in before existing block.
1321 */
1322 trace_xfs_da_link_before(args);
1323 new_info->forw = cpu_to_be32(old_blk->blkno);
1324 new_info->back = old_info->back;
1325 if (old_info->back) {
1326 error = xfs_da_node_read(args->trans, args->dp,
1327 be32_to_cpu(old_info->back),
1328 -1, &bp, args->whichfork);
1329 if (error)
1330 return(error);
1331 ASSERT(bp != NULL);
1332 tmp_info = bp->b_addr;
1333 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1334 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1335 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1336 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1337 }
1338 old_info->back = cpu_to_be32(new_blk->blkno);
1339 } else {
1340 /*
1341 * Link new block in after existing block.
1342 */
1343 trace_xfs_da_link_after(args);
1344 new_info->forw = old_info->forw;
1345 new_info->back = cpu_to_be32(old_blk->blkno);
1346 if (old_info->forw) {
1347 error = xfs_da_node_read(args->trans, args->dp,
1348 be32_to_cpu(old_info->forw),
1349 -1, &bp, args->whichfork);
1350 if (error)
1351 return(error);
1352 ASSERT(bp != NULL);
1353 tmp_info = bp->b_addr;
1354 ASSERT(tmp_info->magic == old_info->magic);
1355 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1356 tmp_info->back = cpu_to_be32(new_blk->blkno);
1357 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1358 }
1359 old_info->forw = cpu_to_be32(new_blk->blkno);
1360 }
1361
1362 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1363 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1364 return(0);
1365 }
1366
1367 /*
1368 * Compare two intermediate nodes for "order".
1369 */
1370 STATIC int
1371 xfs_da_node_order(
1372 struct xfs_buf *node1_bp,
1373 struct xfs_buf *node2_bp)
1374 {
1375 xfs_da_intnode_t *node1, *node2;
1376
1377 node1 = node1_bp->b_addr;
1378 node2 = node2_bp->b_addr;
1379 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1380 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1381 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1382 ((be32_to_cpu(node2->btree[0].hashval) <
1383 be32_to_cpu(node1->btree[0].hashval)) ||
1384 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1385 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1386 return(1);
1387 }
1388 return(0);
1389 }
1390
1391 /*
1392 * Pick up the last hashvalue from an intermediate node.
1393 */
1394 STATIC uint
1395 xfs_da_node_lasthash(
1396 struct xfs_buf *bp,
1397 int *count)
1398 {
1399 xfs_da_intnode_t *node;
1400
1401 node = bp->b_addr;
1402 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1403 if (count)
1404 *count = be16_to_cpu(node->hdr.count);
1405 if (!node->hdr.count)
1406 return(0);
1407 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1408 }
1409
1410 /*
1411 * Unlink a block from a doubly linked list of blocks.
1412 */
1413 STATIC int /* error */
1414 xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1415 xfs_da_state_blk_t *save_blk)
1416 {
1417 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1418 xfs_da_args_t *args;
1419 struct xfs_buf *bp;
1420 int error;
1421
1422 /*
1423 * Set up environment.
1424 */
1425 args = state->args;
1426 ASSERT(args != NULL);
1427 save_info = save_blk->bp->b_addr;
1428 drop_info = drop_blk->bp->b_addr;
1429 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1430 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1431 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1432 ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1433 ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1434 ASSERT(save_blk->magic == drop_blk->magic);
1435 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1436 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1437 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1438 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1439
1440 /*
1441 * Unlink the leaf block from the doubly linked chain of leaves.
1442 */
1443 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1444 trace_xfs_da_unlink_back(args);
1445 save_info->back = drop_info->back;
1446 if (drop_info->back) {
1447 error = xfs_da_node_read(args->trans, args->dp,
1448 be32_to_cpu(drop_info->back),
1449 -1, &bp, args->whichfork);
1450 if (error)
1451 return(error);
1452 ASSERT(bp != NULL);
1453 tmp_info = bp->b_addr;
1454 ASSERT(tmp_info->magic == save_info->magic);
1455 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1456 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1457 xfs_trans_log_buf(args->trans, bp, 0,
1458 sizeof(*tmp_info) - 1);
1459 }
1460 } else {
1461 trace_xfs_da_unlink_forward(args);
1462 save_info->forw = drop_info->forw;
1463 if (drop_info->forw) {
1464 error = xfs_da_node_read(args->trans, args->dp,
1465 be32_to_cpu(drop_info->forw),
1466 -1, &bp, args->whichfork);
1467 if (error)
1468 return(error);
1469 ASSERT(bp != NULL);
1470 tmp_info = bp->b_addr;
1471 ASSERT(tmp_info->magic == save_info->magic);
1472 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1473 tmp_info->back = cpu_to_be32(save_blk->blkno);
1474 xfs_trans_log_buf(args->trans, bp, 0,
1475 sizeof(*tmp_info) - 1);
1476 }
1477 }
1478
1479 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1480 return(0);
1481 }
1482
1483 /*
1484 * Move a path "forward" or "!forward" one block at the current level.
1485 *
1486 * This routine will adjust a "path" to point to the next block
1487 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1488 * Btree, including updating pointers to the intermediate nodes between
1489 * the new bottom and the root.
1490 */
1491 int /* error */
1492 xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1493 int forward, int release, int *result)
1494 {
1495 xfs_da_state_blk_t *blk;
1496 xfs_da_blkinfo_t *info;
1497 xfs_da_intnode_t *node;
1498 xfs_da_args_t *args;
1499 xfs_dablk_t blkno=0;
1500 int level, error;
1501
1502 trace_xfs_da_path_shift(state->args);
1503
1504 /*
1505 * Roll up the Btree looking for the first block where our
1506 * current index is not at the edge of the block. Note that
1507 * we skip the bottom layer because we want the sibling block.
1508 */
1509 args = state->args;
1510 ASSERT(args != NULL);
1511 ASSERT(path != NULL);
1512 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1513 level = (path->active-1) - 1; /* skip bottom layer in path */
1514 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1515 ASSERT(blk->bp != NULL);
1516 node = blk->bp->b_addr;
1517 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1518 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1519 blk->index++;
1520 blkno = be32_to_cpu(node->btree[blk->index].before);
1521 break;
1522 } else if (!forward && (blk->index > 0)) {
1523 blk->index--;
1524 blkno = be32_to_cpu(node->btree[blk->index].before);
1525 break;
1526 }
1527 }
1528 if (level < 0) {
1529 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1530 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1531 return(0);
1532 }
1533
1534 /*
1535 * Roll down the edge of the subtree until we reach the
1536 * same depth we were at originally.
1537 */
1538 for (blk++, level++; level < path->active; blk++, level++) {
1539 /*
1540 * Release the old block.
1541 * (if it's dirty, trans won't actually let go)
1542 */
1543 if (release)
1544 xfs_trans_brelse(args->trans, blk->bp);
1545
1546 /*
1547 * Read the next child block.
1548 */
1549 blk->blkno = blkno;
1550 error = xfs_da_node_read(args->trans, args->dp, blkno, -1,
1551 &blk->bp, args->whichfork);
1552 if (error)
1553 return(error);
1554 ASSERT(blk->bp != NULL);
1555 info = blk->bp->b_addr;
1556 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1557 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1558 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1559 blk->magic = be16_to_cpu(info->magic);
1560 if (blk->magic == XFS_DA_NODE_MAGIC) {
1561 node = (xfs_da_intnode_t *)info;
1562 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1563 if (forward)
1564 blk->index = 0;
1565 else
1566 blk->index = be16_to_cpu(node->hdr.count)-1;
1567 blkno = be32_to_cpu(node->btree[blk->index].before);
1568 } else {
1569 ASSERT(level == path->active-1);
1570 blk->index = 0;
1571 switch(blk->magic) {
1572 case XFS_ATTR_LEAF_MAGIC:
1573 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1574 NULL);
1575 break;
1576 case XFS_DIR2_LEAFN_MAGIC:
1577 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1578 NULL);
1579 break;
1580 default:
1581 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1582 blk->magic == XFS_DIR2_LEAFN_MAGIC);
1583 break;
1584 }
1585 }
1586 }
1587 *result = 0;
1588 return(0);
1589 }
1590
1591
1592 /*========================================================================
1593 * Utility routines.
1594 *========================================================================*/
1595
1596 /*
1597 * Implement a simple hash on a character string.
1598 * Rotate the hash value by 7 bits, then XOR each character in.
1599 * This is implemented with some source-level loop unrolling.
1600 */
1601 xfs_dahash_t
1602 xfs_da_hashname(const __uint8_t *name, int namelen)
1603 {
1604 xfs_dahash_t hash;
1605
1606 /*
1607 * Do four characters at a time as long as we can.
1608 */
1609 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1610 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1611 (name[3] << 0) ^ rol32(hash, 7 * 4);
1612
1613 /*
1614 * Now do the rest of the characters.
1615 */
1616 switch (namelen) {
1617 case 3:
1618 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1619 rol32(hash, 7 * 3);
1620 case 2:
1621 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1622 case 1:
1623 return (name[0] << 0) ^ rol32(hash, 7 * 1);
1624 default: /* case 0: */
1625 return hash;
1626 }
1627 }
1628
1629 enum xfs_dacmp
1630 xfs_da_compname(
1631 struct xfs_da_args *args,
1632 const unsigned char *name,
1633 int len)
1634 {
1635 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1636 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1637 }
1638
1639 static xfs_dahash_t
1640 xfs_default_hashname(
1641 struct xfs_name *name)
1642 {
1643 return xfs_da_hashname(name->name, name->len);
1644 }
1645
1646 const struct xfs_nameops xfs_default_nameops = {
1647 .hashname = xfs_default_hashname,
1648 .compname = xfs_da_compname
1649 };
1650
1651 int
1652 xfs_da_grow_inode_int(
1653 struct xfs_da_args *args,
1654 xfs_fileoff_t *bno,
1655 int count)
1656 {
1657 struct xfs_trans *tp = args->trans;
1658 struct xfs_inode *dp = args->dp;
1659 int w = args->whichfork;
1660 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
1661 struct xfs_bmbt_irec map, *mapp;
1662 int nmap, error, got, i, mapi;
1663
1664 /*
1665 * Find a spot in the file space to put the new block.
1666 */
1667 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
1668 if (error)
1669 return error;
1670
1671 /*
1672 * Try mapping it in one filesystem block.
1673 */
1674 nmap = 1;
1675 ASSERT(args->firstblock != NULL);
1676 error = xfs_bmapi_write(tp, dp, *bno, count,
1677 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
1678 args->firstblock, args->total, &map, &nmap,
1679 args->flist);
1680 if (error)
1681 return error;
1682
1683 ASSERT(nmap <= 1);
1684 if (nmap == 1) {
1685 mapp = &map;
1686 mapi = 1;
1687 } else if (nmap == 0 && count > 1) {
1688 xfs_fileoff_t b;
1689 int c;
1690
1691 /*
1692 * If we didn't get it and the block might work if fragmented,
1693 * try without the CONTIG flag. Loop until we get it all.
1694 */
1695 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1696 for (b = *bno, mapi = 0; b < *bno + count; ) {
1697 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1698 c = (int)(*bno + count - b);
1699 error = xfs_bmapi_write(tp, dp, b, c,
1700 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1701 args->firstblock, args->total,
1702 &mapp[mapi], &nmap, args->flist);
1703 if (error)
1704 goto out_free_map;
1705 if (nmap < 1)
1706 break;
1707 mapi += nmap;
1708 b = mapp[mapi - 1].br_startoff +
1709 mapp[mapi - 1].br_blockcount;
1710 }
1711 } else {
1712 mapi = 0;
1713 mapp = NULL;
1714 }
1715
1716 /*
1717 * Count the blocks we got, make sure it matches the total.
1718 */
1719 for (i = 0, got = 0; i < mapi; i++)
1720 got += mapp[i].br_blockcount;
1721 if (got != count || mapp[0].br_startoff != *bno ||
1722 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1723 *bno + count) {
1724 error = XFS_ERROR(ENOSPC);
1725 goto out_free_map;
1726 }
1727
1728 /* account for newly allocated blocks in reserved blocks total */
1729 args->total -= dp->i_d.di_nblocks - nblks;
1730
1731 out_free_map:
1732 if (mapp != &map)
1733 kmem_free(mapp);
1734 return error;
1735 }
1736
1737 /*
1738 * Add a block to the btree ahead of the file.
1739 * Return the new block number to the caller.
1740 */
1741 int
1742 xfs_da_grow_inode(
1743 struct xfs_da_args *args,
1744 xfs_dablk_t *new_blkno)
1745 {
1746 xfs_fileoff_t bno;
1747 int count;
1748 int error;
1749
1750 trace_xfs_da_grow_inode(args);
1751
1752 if (args->whichfork == XFS_DATA_FORK) {
1753 bno = args->dp->i_mount->m_dirleafblk;
1754 count = args->dp->i_mount->m_dirblkfsbs;
1755 } else {
1756 bno = 0;
1757 count = 1;
1758 }
1759
1760 error = xfs_da_grow_inode_int(args, &bno, count);
1761 if (!error)
1762 *new_blkno = (xfs_dablk_t)bno;
1763 return error;
1764 }
1765
1766 /*
1767 * Ick. We need to always be able to remove a btree block, even
1768 * if there's no space reservation because the filesystem is full.
1769 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1770 * It swaps the target block with the last block in the file. The
1771 * last block in the file can always be removed since it can't cause
1772 * a bmap btree split to do that.
1773 */
1774 STATIC int
1775 xfs_da_swap_lastblock(
1776 xfs_da_args_t *args,
1777 xfs_dablk_t *dead_blknop,
1778 struct xfs_buf **dead_bufp)
1779 {
1780 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1781 struct xfs_buf *dead_buf, *last_buf, *sib_buf, *par_buf;
1782 xfs_fileoff_t lastoff;
1783 xfs_inode_t *ip;
1784 xfs_trans_t *tp;
1785 xfs_mount_t *mp;
1786 int error, w, entno, level, dead_level;
1787 xfs_da_blkinfo_t *dead_info, *sib_info;
1788 xfs_da_intnode_t *par_node, *dead_node;
1789 xfs_dir2_leaf_t *dead_leaf2;
1790 xfs_dahash_t dead_hash;
1791
1792 trace_xfs_da_swap_lastblock(args);
1793
1794 dead_buf = *dead_bufp;
1795 dead_blkno = *dead_blknop;
1796 tp = args->trans;
1797 ip = args->dp;
1798 w = args->whichfork;
1799 ASSERT(w == XFS_DATA_FORK);
1800 mp = ip->i_mount;
1801 lastoff = mp->m_dirfreeblk;
1802 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1803 if (error)
1804 return error;
1805 if (unlikely(lastoff == 0)) {
1806 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1807 mp);
1808 return XFS_ERROR(EFSCORRUPTED);
1809 }
1810 /*
1811 * Read the last block in the btree space.
1812 */
1813 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1814 error = xfs_da_node_read(tp, ip, last_blkno, -1, &last_buf, w);
1815 if (error)
1816 return error;
1817 /*
1818 * Copy the last block into the dead buffer and log it.
1819 */
1820 memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
1821 xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1822 dead_info = dead_buf->b_addr;
1823 /*
1824 * Get values from the moved block.
1825 */
1826 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
1827 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1828 dead_level = 0;
1829 dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1830 } else {
1831 ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1832 dead_node = (xfs_da_intnode_t *)dead_info;
1833 dead_level = be16_to_cpu(dead_node->hdr.level);
1834 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1835 }
1836 sib_buf = par_buf = NULL;
1837 /*
1838 * If the moved block has a left sibling, fix up the pointers.
1839 */
1840 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1841 error = xfs_da_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
1842 if (error)
1843 goto done;
1844 sib_info = sib_buf->b_addr;
1845 if (unlikely(
1846 be32_to_cpu(sib_info->forw) != last_blkno ||
1847 sib_info->magic != dead_info->magic)) {
1848 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1849 XFS_ERRLEVEL_LOW, mp);
1850 error = XFS_ERROR(EFSCORRUPTED);
1851 goto done;
1852 }
1853 sib_info->forw = cpu_to_be32(dead_blkno);
1854 xfs_trans_log_buf(tp, sib_buf,
1855 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1856 sizeof(sib_info->forw)));
1857 sib_buf = NULL;
1858 }
1859 /*
1860 * If the moved block has a right sibling, fix up the pointers.
1861 */
1862 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1863 error = xfs_da_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
1864 if (error)
1865 goto done;
1866 sib_info = sib_buf->b_addr;
1867 if (unlikely(
1868 be32_to_cpu(sib_info->back) != last_blkno ||
1869 sib_info->magic != dead_info->magic)) {
1870 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1871 XFS_ERRLEVEL_LOW, mp);
1872 error = XFS_ERROR(EFSCORRUPTED);
1873 goto done;
1874 }
1875 sib_info->back = cpu_to_be32(dead_blkno);
1876 xfs_trans_log_buf(tp, sib_buf,
1877 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1878 sizeof(sib_info->back)));
1879 sib_buf = NULL;
1880 }
1881 par_blkno = mp->m_dirleafblk;
1882 level = -1;
1883 /*
1884 * Walk down the tree looking for the parent of the moved block.
1885 */
1886 for (;;) {
1887 error = xfs_da_node_read(tp, ip, par_blkno, -1, &par_buf, w);
1888 if (error)
1889 goto done;
1890 par_node = par_buf->b_addr;
1891 if (unlikely(par_node->hdr.info.magic !=
1892 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1893 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1894 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1895 XFS_ERRLEVEL_LOW, mp);
1896 error = XFS_ERROR(EFSCORRUPTED);
1897 goto done;
1898 }
1899 level = be16_to_cpu(par_node->hdr.level);
1900 for (entno = 0;
1901 entno < be16_to_cpu(par_node->hdr.count) &&
1902 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1903 entno++)
1904 continue;
1905 if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1906 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1907 XFS_ERRLEVEL_LOW, mp);
1908 error = XFS_ERROR(EFSCORRUPTED);
1909 goto done;
1910 }
1911 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1912 if (level == dead_level + 1)
1913 break;
1914 xfs_trans_brelse(tp, par_buf);
1915 par_buf = NULL;
1916 }
1917 /*
1918 * We're in the right parent block.
1919 * Look for the right entry.
1920 */
1921 for (;;) {
1922 for (;
1923 entno < be16_to_cpu(par_node->hdr.count) &&
1924 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1925 entno++)
1926 continue;
1927 if (entno < be16_to_cpu(par_node->hdr.count))
1928 break;
1929 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1930 xfs_trans_brelse(tp, par_buf);
1931 par_buf = NULL;
1932 if (unlikely(par_blkno == 0)) {
1933 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1934 XFS_ERRLEVEL_LOW, mp);
1935 error = XFS_ERROR(EFSCORRUPTED);
1936 goto done;
1937 }
1938 error = xfs_da_node_read(tp, ip, par_blkno, -1, &par_buf, w);
1939 if (error)
1940 goto done;
1941 par_node = par_buf->b_addr;
1942 if (unlikely(
1943 be16_to_cpu(par_node->hdr.level) != level ||
1944 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
1945 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1946 XFS_ERRLEVEL_LOW, mp);
1947 error = XFS_ERROR(EFSCORRUPTED);
1948 goto done;
1949 }
1950 entno = 0;
1951 }
1952 /*
1953 * Update the parent entry pointing to the moved block.
1954 */
1955 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1956 xfs_trans_log_buf(tp, par_buf,
1957 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1958 sizeof(par_node->btree[entno].before)));
1959 *dead_blknop = last_blkno;
1960 *dead_bufp = last_buf;
1961 return 0;
1962 done:
1963 if (par_buf)
1964 xfs_trans_brelse(tp, par_buf);
1965 if (sib_buf)
1966 xfs_trans_brelse(tp, sib_buf);
1967 xfs_trans_brelse(tp, last_buf);
1968 return error;
1969 }
1970
1971 /*
1972 * Remove a btree block from a directory or attribute.
1973 */
1974 int
1975 xfs_da_shrink_inode(
1976 xfs_da_args_t *args,
1977 xfs_dablk_t dead_blkno,
1978 struct xfs_buf *dead_buf)
1979 {
1980 xfs_inode_t *dp;
1981 int done, error, w, count;
1982 xfs_trans_t *tp;
1983 xfs_mount_t *mp;
1984
1985 trace_xfs_da_shrink_inode(args);
1986
1987 dp = args->dp;
1988 w = args->whichfork;
1989 tp = args->trans;
1990 mp = dp->i_mount;
1991 if (w == XFS_DATA_FORK)
1992 count = mp->m_dirblkfsbs;
1993 else
1994 count = 1;
1995 for (;;) {
1996 /*
1997 * Remove extents. If we get ENOSPC for a dir we have to move
1998 * the last block to the place we want to kill.
1999 */
2000 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
2001 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2002 0, args->firstblock, args->flist,
2003 &done)) == ENOSPC) {
2004 if (w != XFS_DATA_FORK)
2005 break;
2006 if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
2007 &dead_buf)))
2008 break;
2009 } else {
2010 break;
2011 }
2012 }
2013 xfs_trans_binval(tp, dead_buf);
2014 return error;
2015 }
2016
2017 /*
2018 * See if the mapping(s) for this btree block are valid, i.e.
2019 * don't contain holes, are logically contiguous, and cover the whole range.
2020 */
2021 STATIC int
2022 xfs_da_map_covers_blocks(
2023 int nmap,
2024 xfs_bmbt_irec_t *mapp,
2025 xfs_dablk_t bno,
2026 int count)
2027 {
2028 int i;
2029 xfs_fileoff_t off;
2030
2031 for (i = 0, off = bno; i < nmap; i++) {
2032 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2033 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2034 return 0;
2035 }
2036 if (off != mapp[i].br_startoff) {
2037 return 0;
2038 }
2039 off += mapp[i].br_blockcount;
2040 }
2041 return off == bno + count;
2042 }
2043
2044 /*
2045 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2046 *
2047 * For the single map case, it is assumed that the caller has provided a pointer
2048 * to a valid xfs_buf_map. For the multiple map case, this function will
2049 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2050 * map pointer with the allocated map.
2051 */
2052 static int
2053 xfs_buf_map_from_irec(
2054 struct xfs_mount *mp,
2055 struct xfs_buf_map **mapp,
2056 unsigned int *nmaps,
2057 struct xfs_bmbt_irec *irecs,
2058 unsigned int nirecs)
2059 {
2060 struct xfs_buf_map *map;
2061 int i;
2062
2063 ASSERT(*nmaps == 1);
2064 ASSERT(nirecs >= 1);
2065
2066 if (nirecs > 1) {
2067 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP);
2068 if (!map)
2069 return ENOMEM;
2070 *mapp = map;
2071 }
2072
2073 *nmaps = nirecs;
2074 map = *mapp;
2075 for (i = 0; i < *nmaps; i++) {
2076 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2077 irecs[i].br_startblock != HOLESTARTBLOCK);
2078 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2079 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2080 }
2081 return 0;
2082 }
2083
2084 /*
2085 * Map the block we are given ready for reading. There are three possible return
2086 * values:
2087 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2088 * caller knows not to execute a subsequent read.
2089 * 0 - if we mapped the block successfully
2090 * >0 - positive error number if there was an error.
2091 */
2092 static int
2093 xfs_dabuf_map(
2094 struct xfs_trans *trans,
2095 struct xfs_inode *dp,
2096 xfs_dablk_t bno,
2097 xfs_daddr_t mappedbno,
2098 int whichfork,
2099 struct xfs_buf_map **map,
2100 int *nmaps)
2101 {
2102 struct xfs_mount *mp = dp->i_mount;
2103 int nfsb;
2104 int error = 0;
2105 struct xfs_bmbt_irec irec;
2106 struct xfs_bmbt_irec *irecs = &irec;
2107 int nirecs;
2108
2109 ASSERT(map && *map);
2110 ASSERT(*nmaps == 1);
2111
2112 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
2113
2114 /*
2115 * Caller doesn't have a mapping. -2 means don't complain
2116 * if we land in a hole.
2117 */
2118 if (mappedbno == -1 || mappedbno == -2) {
2119 /*
2120 * Optimize the one-block case.
2121 */
2122 if (nfsb != 1)
2123 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP);
2124
2125 nirecs = nfsb;
2126 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2127 &nirecs, xfs_bmapi_aflag(whichfork));
2128 if (error)
2129 goto out;
2130 } else {
2131 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2132 irecs->br_startoff = (xfs_fileoff_t)bno;
2133 irecs->br_blockcount = nfsb;
2134 irecs->br_state = 0;
2135 nirecs = 1;
2136 }
2137
2138 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2139 error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
2140 if (unlikely(error == EFSCORRUPTED)) {
2141 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2142 int i;
2143 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2144 __func__, (long long)bno,
2145 (long long)dp->i_ino);
2146 for (i = 0; i < *nmaps; i++) {
2147 xfs_alert(mp,
2148 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2149 i,
2150 (long long)irecs[i].br_startoff,
2151 (long long)irecs[i].br_startblock,
2152 (long long)irecs[i].br_blockcount,
2153 irecs[i].br_state);
2154 }
2155 }
2156 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2157 XFS_ERRLEVEL_LOW, mp);
2158 }
2159 goto out;
2160 }
2161 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2162 out:
2163 if (irecs != &irec)
2164 kmem_free(irecs);
2165 return error;
2166 }
2167
2168 /*
2169 * Get a buffer for the dir/attr block.
2170 */
2171 int
2172 xfs_da_get_buf(
2173 struct xfs_trans *trans,
2174 struct xfs_inode *dp,
2175 xfs_dablk_t bno,
2176 xfs_daddr_t mappedbno,
2177 struct xfs_buf **bpp,
2178 int whichfork)
2179 {
2180 struct xfs_buf *bp;
2181 struct xfs_buf_map map;
2182 struct xfs_buf_map *mapp;
2183 int nmap;
2184 int error;
2185
2186 *bpp = NULL;
2187 mapp = &map;
2188 nmap = 1;
2189 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2190 &mapp, &nmap);
2191 if (error) {
2192 /* mapping a hole is not an error, but we don't continue */
2193 if (error == -1)
2194 error = 0;
2195 goto out_free;
2196 }
2197
2198 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2199 mapp, nmap, 0);
2200 error = bp ? bp->b_error : XFS_ERROR(EIO);
2201 if (error) {
2202 xfs_trans_brelse(trans, bp);
2203 goto out_free;
2204 }
2205
2206 *bpp = bp;
2207
2208 out_free:
2209 if (mapp != &map)
2210 kmem_free(mapp);
2211
2212 return error;
2213 }
2214
2215 /*
2216 * Get a buffer for the dir/attr block, fill in the contents.
2217 */
2218 int
2219 xfs_da_read_buf(
2220 struct xfs_trans *trans,
2221 struct xfs_inode *dp,
2222 xfs_dablk_t bno,
2223 xfs_daddr_t mappedbno,
2224 struct xfs_buf **bpp,
2225 int whichfork,
2226 xfs_buf_iodone_t verifier)
2227 {
2228 struct xfs_buf *bp;
2229 struct xfs_buf_map map;
2230 struct xfs_buf_map *mapp;
2231 int nmap;
2232 int error;
2233
2234 *bpp = NULL;
2235 mapp = &map;
2236 nmap = 1;
2237 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2238 &mapp, &nmap);
2239 if (error) {
2240 /* mapping a hole is not an error, but we don't continue */
2241 if (error == -1)
2242 error = 0;
2243 goto out_free;
2244 }
2245
2246 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2247 dp->i_mount->m_ddev_targp,
2248 mapp, nmap, 0, &bp, verifier);
2249 if (error)
2250 goto out_free;
2251
2252 if (whichfork == XFS_ATTR_FORK)
2253 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2254 else
2255 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2256
2257 /*
2258 * This verification code will be moved to a CRC verification callback
2259 * function so just leave it here unchanged until then.
2260 */
2261 {
2262 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
2263 xfs_dir2_free_t *free = bp->b_addr;
2264 xfs_da_blkinfo_t *info = bp->b_addr;
2265 uint magic, magic1;
2266 struct xfs_mount *mp = dp->i_mount;
2267
2268 magic = be16_to_cpu(info->magic);
2269 magic1 = be32_to_cpu(hdr->magic);
2270 if (unlikely(
2271 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2272 (magic != XFS_ATTR_LEAF_MAGIC) &&
2273 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2274 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2275 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2276 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2277 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2278 mp, XFS_ERRTAG_DA_READ_BUF,
2279 XFS_RANDOM_DA_READ_BUF))) {
2280 trace_xfs_da_btree_corrupt(bp, _RET_IP_);
2281 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2282 XFS_ERRLEVEL_LOW, mp, info);
2283 error = XFS_ERROR(EFSCORRUPTED);
2284 xfs_trans_brelse(trans, bp);
2285 goto out_free;
2286 }
2287 }
2288 *bpp = bp;
2289 out_free:
2290 if (mapp != &map)
2291 kmem_free(mapp);
2292
2293 return error;
2294 }
2295
2296 /*
2297 * Readahead the dir/attr block.
2298 */
2299 xfs_daddr_t
2300 xfs_da_reada_buf(
2301 struct xfs_trans *trans,
2302 struct xfs_inode *dp,
2303 xfs_dablk_t bno,
2304 xfs_daddr_t mappedbno,
2305 int whichfork,
2306 xfs_buf_iodone_t verifier)
2307 {
2308 struct xfs_buf_map map;
2309 struct xfs_buf_map *mapp;
2310 int nmap;
2311 int error;
2312
2313 mapp = &map;
2314 nmap = 1;
2315 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2316 &mapp, &nmap);
2317 if (error) {
2318 /* mapping a hole is not an error, but we don't continue */
2319 if (error == -1)
2320 error = 0;
2321 goto out_free;
2322 }
2323
2324 mappedbno = mapp[0].bm_bn;
2325 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, NULL);
2326
2327 out_free:
2328 if (mapp != &map)
2329 kmem_free(mapp);
2330
2331 if (error)
2332 return -1;
2333 return mappedbno;
2334 }
2335
2336 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2337
2338 /*
2339 * Allocate a dir-state structure.
2340 * We don't put them on the stack since they're large.
2341 */
2342 xfs_da_state_t *
2343 xfs_da_state_alloc(void)
2344 {
2345 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2346 }
2347
2348 /*
2349 * Kill the altpath contents of a da-state structure.
2350 */
2351 STATIC void
2352 xfs_da_state_kill_altpath(xfs_da_state_t *state)
2353 {
2354 int i;
2355
2356 for (i = 0; i < state->altpath.active; i++)
2357 state->altpath.blk[i].bp = NULL;
2358 state->altpath.active = 0;
2359 }
2360
2361 /*
2362 * Free a da-state structure.
2363 */
2364 void
2365 xfs_da_state_free(xfs_da_state_t *state)
2366 {
2367 xfs_da_state_kill_altpath(state);
2368 #ifdef DEBUG
2369 memset((char *)state, 0, sizeof(*state));
2370 #endif /* DEBUG */
2371 kmem_zone_free(xfs_da_state_zone, state);
2372 }
This page took 0.083108 seconds and 4 git commands to generate.