xfs: create a shared header file for format-related information
[deliverable/linux.git] / fs / xfs / xfs_da_btree.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_dir2_format.h"
32 #include "xfs_dir2.h"
33 #include "xfs_dir2_priv.h"
34 #include "xfs_dinode.h"
35 #include "xfs_inode.h"
36 #include "xfs_inode_item.h"
37 #include "xfs_alloc.h"
38 #include "xfs_bmap.h"
39 #include "xfs_attr.h"
40 #include "xfs_attr_leaf.h"
41 #include "xfs_error.h"
42 #include "xfs_trace.h"
43 #include "xfs_cksum.h"
44 #include "xfs_buf_item.h"
45
46 /*
47 * xfs_da_btree.c
48 *
49 * Routines to implement directories as Btrees of hashed names.
50 */
51
52 /*========================================================================
53 * Function prototypes for the kernel.
54 *========================================================================*/
55
56 /*
57 * Routines used for growing the Btree.
58 */
59 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
60 xfs_da_state_blk_t *existing_root,
61 xfs_da_state_blk_t *new_child);
62 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
63 xfs_da_state_blk_t *existing_blk,
64 xfs_da_state_blk_t *split_blk,
65 xfs_da_state_blk_t *blk_to_add,
66 int treelevel,
67 int *result);
68 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
69 xfs_da_state_blk_t *node_blk_1,
70 xfs_da_state_blk_t *node_blk_2);
71 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
72 xfs_da_state_blk_t *old_node_blk,
73 xfs_da_state_blk_t *new_node_blk);
74
75 /*
76 * Routines used for shrinking the Btree.
77 */
78 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
79 xfs_da_state_blk_t *root_blk);
80 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
81 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
82 xfs_da_state_blk_t *drop_blk);
83 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
84 xfs_da_state_blk_t *src_node_blk,
85 xfs_da_state_blk_t *dst_node_blk);
86
87 /*
88 * Utility routines.
89 */
90 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
91 xfs_da_state_blk_t *drop_blk,
92 xfs_da_state_blk_t *save_blk);
93
94
95 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
96
97 /*
98 * Allocate a dir-state structure.
99 * We don't put them on the stack since they're large.
100 */
101 xfs_da_state_t *
102 xfs_da_state_alloc(void)
103 {
104 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
105 }
106
107 /*
108 * Kill the altpath contents of a da-state structure.
109 */
110 STATIC void
111 xfs_da_state_kill_altpath(xfs_da_state_t *state)
112 {
113 int i;
114
115 for (i = 0; i < state->altpath.active; i++)
116 state->altpath.blk[i].bp = NULL;
117 state->altpath.active = 0;
118 }
119
120 /*
121 * Free a da-state structure.
122 */
123 void
124 xfs_da_state_free(xfs_da_state_t *state)
125 {
126 xfs_da_state_kill_altpath(state);
127 #ifdef DEBUG
128 memset((char *)state, 0, sizeof(*state));
129 #endif /* DEBUG */
130 kmem_zone_free(xfs_da_state_zone, state);
131 }
132
133 void
134 xfs_da3_node_hdr_from_disk(
135 struct xfs_da3_icnode_hdr *to,
136 struct xfs_da_intnode *from)
137 {
138 ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
139 from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
140
141 if (from->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
142 struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)from;
143
144 to->forw = be32_to_cpu(hdr3->info.hdr.forw);
145 to->back = be32_to_cpu(hdr3->info.hdr.back);
146 to->magic = be16_to_cpu(hdr3->info.hdr.magic);
147 to->count = be16_to_cpu(hdr3->__count);
148 to->level = be16_to_cpu(hdr3->__level);
149 return;
150 }
151 to->forw = be32_to_cpu(from->hdr.info.forw);
152 to->back = be32_to_cpu(from->hdr.info.back);
153 to->magic = be16_to_cpu(from->hdr.info.magic);
154 to->count = be16_to_cpu(from->hdr.__count);
155 to->level = be16_to_cpu(from->hdr.__level);
156 }
157
158 void
159 xfs_da3_node_hdr_to_disk(
160 struct xfs_da_intnode *to,
161 struct xfs_da3_icnode_hdr *from)
162 {
163 ASSERT(from->magic == XFS_DA_NODE_MAGIC ||
164 from->magic == XFS_DA3_NODE_MAGIC);
165
166 if (from->magic == XFS_DA3_NODE_MAGIC) {
167 struct xfs_da3_node_hdr *hdr3 = (struct xfs_da3_node_hdr *)to;
168
169 hdr3->info.hdr.forw = cpu_to_be32(from->forw);
170 hdr3->info.hdr.back = cpu_to_be32(from->back);
171 hdr3->info.hdr.magic = cpu_to_be16(from->magic);
172 hdr3->__count = cpu_to_be16(from->count);
173 hdr3->__level = cpu_to_be16(from->level);
174 return;
175 }
176 to->hdr.info.forw = cpu_to_be32(from->forw);
177 to->hdr.info.back = cpu_to_be32(from->back);
178 to->hdr.info.magic = cpu_to_be16(from->magic);
179 to->hdr.__count = cpu_to_be16(from->count);
180 to->hdr.__level = cpu_to_be16(from->level);
181 }
182
183 static bool
184 xfs_da3_node_verify(
185 struct xfs_buf *bp)
186 {
187 struct xfs_mount *mp = bp->b_target->bt_mount;
188 struct xfs_da_intnode *hdr = bp->b_addr;
189 struct xfs_da3_icnode_hdr ichdr;
190
191 xfs_da3_node_hdr_from_disk(&ichdr, hdr);
192
193 if (xfs_sb_version_hascrc(&mp->m_sb)) {
194 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
195
196 if (ichdr.magic != XFS_DA3_NODE_MAGIC)
197 return false;
198
199 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
200 return false;
201 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
202 return false;
203 } else {
204 if (ichdr.magic != XFS_DA_NODE_MAGIC)
205 return false;
206 }
207 if (ichdr.level == 0)
208 return false;
209 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
210 return false;
211 if (ichdr.count == 0)
212 return false;
213
214 /*
215 * we don't know if the node is for and attribute or directory tree,
216 * so only fail if the count is outside both bounds
217 */
218 if (ichdr.count > mp->m_dir_node_ents &&
219 ichdr.count > mp->m_attr_node_ents)
220 return false;
221
222 /* XXX: hash order check? */
223
224 return true;
225 }
226
227 static void
228 xfs_da3_node_write_verify(
229 struct xfs_buf *bp)
230 {
231 struct xfs_mount *mp = bp->b_target->bt_mount;
232 struct xfs_buf_log_item *bip = bp->b_fspriv;
233 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
234
235 if (!xfs_da3_node_verify(bp)) {
236 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
237 xfs_buf_ioerror(bp, EFSCORRUPTED);
238 return;
239 }
240
241 if (!xfs_sb_version_hascrc(&mp->m_sb))
242 return;
243
244 if (bip)
245 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
246
247 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_DA3_NODE_CRC_OFF);
248 }
249
250 /*
251 * leaf/node format detection on trees is sketchy, so a node read can be done on
252 * leaf level blocks when detection identifies the tree as a node format tree
253 * incorrectly. In this case, we need to swap the verifier to match the correct
254 * format of the block being read.
255 */
256 static void
257 xfs_da3_node_read_verify(
258 struct xfs_buf *bp)
259 {
260 struct xfs_mount *mp = bp->b_target->bt_mount;
261 struct xfs_da_blkinfo *info = bp->b_addr;
262
263 switch (be16_to_cpu(info->magic)) {
264 case XFS_DA3_NODE_MAGIC:
265 if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
266 XFS_DA3_NODE_CRC_OFF))
267 break;
268 /* fall through */
269 case XFS_DA_NODE_MAGIC:
270 if (!xfs_da3_node_verify(bp))
271 break;
272 return;
273 case XFS_ATTR_LEAF_MAGIC:
274 case XFS_ATTR3_LEAF_MAGIC:
275 bp->b_ops = &xfs_attr3_leaf_buf_ops;
276 bp->b_ops->verify_read(bp);
277 return;
278 case XFS_DIR2_LEAFN_MAGIC:
279 case XFS_DIR3_LEAFN_MAGIC:
280 bp->b_ops = &xfs_dir3_leafn_buf_ops;
281 bp->b_ops->verify_read(bp);
282 return;
283 default:
284 break;
285 }
286
287 /* corrupt block */
288 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
289 xfs_buf_ioerror(bp, EFSCORRUPTED);
290 }
291
292 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
293 .verify_read = xfs_da3_node_read_verify,
294 .verify_write = xfs_da3_node_write_verify,
295 };
296
297 int
298 xfs_da3_node_read(
299 struct xfs_trans *tp,
300 struct xfs_inode *dp,
301 xfs_dablk_t bno,
302 xfs_daddr_t mappedbno,
303 struct xfs_buf **bpp,
304 int which_fork)
305 {
306 int err;
307
308 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
309 which_fork, &xfs_da3_node_buf_ops);
310 if (!err && tp) {
311 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
312 int type;
313
314 switch (be16_to_cpu(info->magic)) {
315 case XFS_DA_NODE_MAGIC:
316 case XFS_DA3_NODE_MAGIC:
317 type = XFS_BLFT_DA_NODE_BUF;
318 break;
319 case XFS_ATTR_LEAF_MAGIC:
320 case XFS_ATTR3_LEAF_MAGIC:
321 type = XFS_BLFT_ATTR_LEAF_BUF;
322 break;
323 case XFS_DIR2_LEAFN_MAGIC:
324 case XFS_DIR3_LEAFN_MAGIC:
325 type = XFS_BLFT_DIR_LEAFN_BUF;
326 break;
327 default:
328 type = 0;
329 ASSERT(0);
330 break;
331 }
332 xfs_trans_buf_set_type(tp, *bpp, type);
333 }
334 return err;
335 }
336
337 /*========================================================================
338 * Routines used for growing the Btree.
339 *========================================================================*/
340
341 /*
342 * Create the initial contents of an intermediate node.
343 */
344 int
345 xfs_da3_node_create(
346 struct xfs_da_args *args,
347 xfs_dablk_t blkno,
348 int level,
349 struct xfs_buf **bpp,
350 int whichfork)
351 {
352 struct xfs_da_intnode *node;
353 struct xfs_trans *tp = args->trans;
354 struct xfs_mount *mp = tp->t_mountp;
355 struct xfs_da3_icnode_hdr ichdr = {0};
356 struct xfs_buf *bp;
357 int error;
358
359 trace_xfs_da_node_create(args);
360 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
361
362 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
363 if (error)
364 return(error);
365 bp->b_ops = &xfs_da3_node_buf_ops;
366 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
367 node = bp->b_addr;
368
369 if (xfs_sb_version_hascrc(&mp->m_sb)) {
370 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
371
372 ichdr.magic = XFS_DA3_NODE_MAGIC;
373 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
374 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
375 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
376 } else {
377 ichdr.magic = XFS_DA_NODE_MAGIC;
378 }
379 ichdr.level = level;
380
381 xfs_da3_node_hdr_to_disk(node, &ichdr);
382 xfs_trans_log_buf(tp, bp,
383 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
384
385 *bpp = bp;
386 return(0);
387 }
388
389 /*
390 * Split a leaf node, rebalance, then possibly split
391 * intermediate nodes, rebalance, etc.
392 */
393 int /* error */
394 xfs_da3_split(
395 struct xfs_da_state *state)
396 {
397 struct xfs_da_state_blk *oldblk;
398 struct xfs_da_state_blk *newblk;
399 struct xfs_da_state_blk *addblk;
400 struct xfs_da_intnode *node;
401 struct xfs_buf *bp;
402 int max;
403 int action = 0;
404 int error;
405 int i;
406
407 trace_xfs_da_split(state->args);
408
409 /*
410 * Walk back up the tree splitting/inserting/adjusting as necessary.
411 * If we need to insert and there isn't room, split the node, then
412 * decide which fragment to insert the new block from below into.
413 * Note that we may split the root this way, but we need more fixup.
414 */
415 max = state->path.active - 1;
416 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
417 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
418 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
419
420 addblk = &state->path.blk[max]; /* initial dummy value */
421 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
422 oldblk = &state->path.blk[i];
423 newblk = &state->altpath.blk[i];
424
425 /*
426 * If a leaf node then
427 * Allocate a new leaf node, then rebalance across them.
428 * else if an intermediate node then
429 * We split on the last layer, must we split the node?
430 */
431 switch (oldblk->magic) {
432 case XFS_ATTR_LEAF_MAGIC:
433 error = xfs_attr3_leaf_split(state, oldblk, newblk);
434 if ((error != 0) && (error != ENOSPC)) {
435 return(error); /* GROT: attr is inconsistent */
436 }
437 if (!error) {
438 addblk = newblk;
439 break;
440 }
441 /*
442 * Entry wouldn't fit, split the leaf again.
443 */
444 state->extravalid = 1;
445 if (state->inleaf) {
446 state->extraafter = 0; /* before newblk */
447 trace_xfs_attr_leaf_split_before(state->args);
448 error = xfs_attr3_leaf_split(state, oldblk,
449 &state->extrablk);
450 } else {
451 state->extraafter = 1; /* after newblk */
452 trace_xfs_attr_leaf_split_after(state->args);
453 error = xfs_attr3_leaf_split(state, newblk,
454 &state->extrablk);
455 }
456 if (error)
457 return(error); /* GROT: attr inconsistent */
458 addblk = newblk;
459 break;
460 case XFS_DIR2_LEAFN_MAGIC:
461 error = xfs_dir2_leafn_split(state, oldblk, newblk);
462 if (error)
463 return error;
464 addblk = newblk;
465 break;
466 case XFS_DA_NODE_MAGIC:
467 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
468 max - i, &action);
469 addblk->bp = NULL;
470 if (error)
471 return(error); /* GROT: dir is inconsistent */
472 /*
473 * Record the newly split block for the next time thru?
474 */
475 if (action)
476 addblk = newblk;
477 else
478 addblk = NULL;
479 break;
480 }
481
482 /*
483 * Update the btree to show the new hashval for this child.
484 */
485 xfs_da3_fixhashpath(state, &state->path);
486 }
487 if (!addblk)
488 return(0);
489
490 /*
491 * Split the root node.
492 */
493 ASSERT(state->path.active == 0);
494 oldblk = &state->path.blk[0];
495 error = xfs_da3_root_split(state, oldblk, addblk);
496 if (error) {
497 addblk->bp = NULL;
498 return(error); /* GROT: dir is inconsistent */
499 }
500
501 /*
502 * Update pointers to the node which used to be block 0 and
503 * just got bumped because of the addition of a new root node.
504 * There might be three blocks involved if a double split occurred,
505 * and the original block 0 could be at any position in the list.
506 *
507 * Note: the magic numbers and sibling pointers are in the same
508 * physical place for both v2 and v3 headers (by design). Hence it
509 * doesn't matter which version of the xfs_da_intnode structure we use
510 * here as the result will be the same using either structure.
511 */
512 node = oldblk->bp->b_addr;
513 if (node->hdr.info.forw) {
514 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
515 bp = addblk->bp;
516 } else {
517 ASSERT(state->extravalid);
518 bp = state->extrablk.bp;
519 }
520 node = bp->b_addr;
521 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
522 xfs_trans_log_buf(state->args->trans, bp,
523 XFS_DA_LOGRANGE(node, &node->hdr.info,
524 sizeof(node->hdr.info)));
525 }
526 node = oldblk->bp->b_addr;
527 if (node->hdr.info.back) {
528 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
529 bp = addblk->bp;
530 } else {
531 ASSERT(state->extravalid);
532 bp = state->extrablk.bp;
533 }
534 node = bp->b_addr;
535 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
536 xfs_trans_log_buf(state->args->trans, bp,
537 XFS_DA_LOGRANGE(node, &node->hdr.info,
538 sizeof(node->hdr.info)));
539 }
540 addblk->bp = NULL;
541 return(0);
542 }
543
544 /*
545 * Split the root. We have to create a new root and point to the two
546 * parts (the split old root) that we just created. Copy block zero to
547 * the EOF, extending the inode in process.
548 */
549 STATIC int /* error */
550 xfs_da3_root_split(
551 struct xfs_da_state *state,
552 struct xfs_da_state_blk *blk1,
553 struct xfs_da_state_blk *blk2)
554 {
555 struct xfs_da_intnode *node;
556 struct xfs_da_intnode *oldroot;
557 struct xfs_da_node_entry *btree;
558 struct xfs_da3_icnode_hdr nodehdr;
559 struct xfs_da_args *args;
560 struct xfs_buf *bp;
561 struct xfs_inode *dp;
562 struct xfs_trans *tp;
563 struct xfs_mount *mp;
564 struct xfs_dir2_leaf *leaf;
565 xfs_dablk_t blkno;
566 int level;
567 int error;
568 int size;
569
570 trace_xfs_da_root_split(state->args);
571
572 /*
573 * Copy the existing (incorrect) block from the root node position
574 * to a free space somewhere.
575 */
576 args = state->args;
577 error = xfs_da_grow_inode(args, &blkno);
578 if (error)
579 return error;
580
581 dp = args->dp;
582 tp = args->trans;
583 mp = state->mp;
584 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
585 if (error)
586 return error;
587 node = bp->b_addr;
588 oldroot = blk1->bp->b_addr;
589 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
590 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
591 struct xfs_da3_icnode_hdr nodehdr;
592
593 xfs_da3_node_hdr_from_disk(&nodehdr, oldroot);
594 btree = xfs_da3_node_tree_p(oldroot);
595 size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
596 level = nodehdr.level;
597
598 /*
599 * we are about to copy oldroot to bp, so set up the type
600 * of bp while we know exactly what it will be.
601 */
602 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
603 } else {
604 struct xfs_dir3_icleaf_hdr leafhdr;
605 struct xfs_dir2_leaf_entry *ents;
606
607 leaf = (xfs_dir2_leaf_t *)oldroot;
608 xfs_dir3_leaf_hdr_from_disk(&leafhdr, leaf);
609 ents = xfs_dir3_leaf_ents_p(leaf);
610
611 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
612 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
613 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
614 level = 0;
615
616 /*
617 * we are about to copy oldroot to bp, so set up the type
618 * of bp while we know exactly what it will be.
619 */
620 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
621 }
622
623 /*
624 * we can copy most of the information in the node from one block to
625 * another, but for CRC enabled headers we have to make sure that the
626 * block specific identifiers are kept intact. We update the buffer
627 * directly for this.
628 */
629 memcpy(node, oldroot, size);
630 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
631 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
632 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
633
634 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
635 }
636 xfs_trans_log_buf(tp, bp, 0, size - 1);
637
638 bp->b_ops = blk1->bp->b_ops;
639 xfs_trans_buf_copy_type(bp, blk1->bp);
640 blk1->bp = bp;
641 blk1->blkno = blkno;
642
643 /*
644 * Set up the new root node.
645 */
646 error = xfs_da3_node_create(args,
647 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
648 level + 1, &bp, args->whichfork);
649 if (error)
650 return error;
651
652 node = bp->b_addr;
653 xfs_da3_node_hdr_from_disk(&nodehdr, node);
654 btree = xfs_da3_node_tree_p(node);
655 btree[0].hashval = cpu_to_be32(blk1->hashval);
656 btree[0].before = cpu_to_be32(blk1->blkno);
657 btree[1].hashval = cpu_to_be32(blk2->hashval);
658 btree[1].before = cpu_to_be32(blk2->blkno);
659 nodehdr.count = 2;
660 xfs_da3_node_hdr_to_disk(node, &nodehdr);
661
662 #ifdef DEBUG
663 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
664 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
665 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
666 blk1->blkno < mp->m_dirfreeblk);
667 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
668 blk2->blkno < mp->m_dirfreeblk);
669 }
670 #endif
671
672 /* Header is already logged by xfs_da_node_create */
673 xfs_trans_log_buf(tp, bp,
674 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
675
676 return 0;
677 }
678
679 /*
680 * Split the node, rebalance, then add the new entry.
681 */
682 STATIC int /* error */
683 xfs_da3_node_split(
684 struct xfs_da_state *state,
685 struct xfs_da_state_blk *oldblk,
686 struct xfs_da_state_blk *newblk,
687 struct xfs_da_state_blk *addblk,
688 int treelevel,
689 int *result)
690 {
691 struct xfs_da_intnode *node;
692 struct xfs_da3_icnode_hdr nodehdr;
693 xfs_dablk_t blkno;
694 int newcount;
695 int error;
696 int useextra;
697
698 trace_xfs_da_node_split(state->args);
699
700 node = oldblk->bp->b_addr;
701 xfs_da3_node_hdr_from_disk(&nodehdr, node);
702
703 /*
704 * With V2 dirs the extra block is data or freespace.
705 */
706 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
707 newcount = 1 + useextra;
708 /*
709 * Do we have to split the node?
710 */
711 if (nodehdr.count + newcount > state->node_ents) {
712 /*
713 * Allocate a new node, add to the doubly linked chain of
714 * nodes, then move some of our excess entries into it.
715 */
716 error = xfs_da_grow_inode(state->args, &blkno);
717 if (error)
718 return(error); /* GROT: dir is inconsistent */
719
720 error = xfs_da3_node_create(state->args, blkno, treelevel,
721 &newblk->bp, state->args->whichfork);
722 if (error)
723 return(error); /* GROT: dir is inconsistent */
724 newblk->blkno = blkno;
725 newblk->magic = XFS_DA_NODE_MAGIC;
726 xfs_da3_node_rebalance(state, oldblk, newblk);
727 error = xfs_da3_blk_link(state, oldblk, newblk);
728 if (error)
729 return(error);
730 *result = 1;
731 } else {
732 *result = 0;
733 }
734
735 /*
736 * Insert the new entry(s) into the correct block
737 * (updating last hashval in the process).
738 *
739 * xfs_da3_node_add() inserts BEFORE the given index,
740 * and as a result of using node_lookup_int() we always
741 * point to a valid entry (not after one), but a split
742 * operation always results in a new block whose hashvals
743 * FOLLOW the current block.
744 *
745 * If we had double-split op below us, then add the extra block too.
746 */
747 node = oldblk->bp->b_addr;
748 xfs_da3_node_hdr_from_disk(&nodehdr, node);
749 if (oldblk->index <= nodehdr.count) {
750 oldblk->index++;
751 xfs_da3_node_add(state, oldblk, addblk);
752 if (useextra) {
753 if (state->extraafter)
754 oldblk->index++;
755 xfs_da3_node_add(state, oldblk, &state->extrablk);
756 state->extravalid = 0;
757 }
758 } else {
759 newblk->index++;
760 xfs_da3_node_add(state, newblk, addblk);
761 if (useextra) {
762 if (state->extraafter)
763 newblk->index++;
764 xfs_da3_node_add(state, newblk, &state->extrablk);
765 state->extravalid = 0;
766 }
767 }
768
769 return(0);
770 }
771
772 /*
773 * Balance the btree elements between two intermediate nodes,
774 * usually one full and one empty.
775 *
776 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
777 */
778 STATIC void
779 xfs_da3_node_rebalance(
780 struct xfs_da_state *state,
781 struct xfs_da_state_blk *blk1,
782 struct xfs_da_state_blk *blk2)
783 {
784 struct xfs_da_intnode *node1;
785 struct xfs_da_intnode *node2;
786 struct xfs_da_intnode *tmpnode;
787 struct xfs_da_node_entry *btree1;
788 struct xfs_da_node_entry *btree2;
789 struct xfs_da_node_entry *btree_s;
790 struct xfs_da_node_entry *btree_d;
791 struct xfs_da3_icnode_hdr nodehdr1;
792 struct xfs_da3_icnode_hdr nodehdr2;
793 struct xfs_trans *tp;
794 int count;
795 int tmp;
796 int swap = 0;
797
798 trace_xfs_da_node_rebalance(state->args);
799
800 node1 = blk1->bp->b_addr;
801 node2 = blk2->bp->b_addr;
802 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
803 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
804 btree1 = xfs_da3_node_tree_p(node1);
805 btree2 = xfs_da3_node_tree_p(node2);
806
807 /*
808 * Figure out how many entries need to move, and in which direction.
809 * Swap the nodes around if that makes it simpler.
810 */
811 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
812 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
813 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
814 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
815 tmpnode = node1;
816 node1 = node2;
817 node2 = tmpnode;
818 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
819 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
820 btree1 = xfs_da3_node_tree_p(node1);
821 btree2 = xfs_da3_node_tree_p(node2);
822 swap = 1;
823 }
824
825 count = (nodehdr1.count - nodehdr2.count) / 2;
826 if (count == 0)
827 return;
828 tp = state->args->trans;
829 /*
830 * Two cases: high-to-low and low-to-high.
831 */
832 if (count > 0) {
833 /*
834 * Move elements in node2 up to make a hole.
835 */
836 tmp = nodehdr2.count;
837 if (tmp > 0) {
838 tmp *= (uint)sizeof(xfs_da_node_entry_t);
839 btree_s = &btree2[0];
840 btree_d = &btree2[count];
841 memmove(btree_d, btree_s, tmp);
842 }
843
844 /*
845 * Move the req'd B-tree elements from high in node1 to
846 * low in node2.
847 */
848 nodehdr2.count += count;
849 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
850 btree_s = &btree1[nodehdr1.count - count];
851 btree_d = &btree2[0];
852 memcpy(btree_d, btree_s, tmp);
853 nodehdr1.count -= count;
854 } else {
855 /*
856 * Move the req'd B-tree elements from low in node2 to
857 * high in node1.
858 */
859 count = -count;
860 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
861 btree_s = &btree2[0];
862 btree_d = &btree1[nodehdr1.count];
863 memcpy(btree_d, btree_s, tmp);
864 nodehdr1.count += count;
865
866 xfs_trans_log_buf(tp, blk1->bp,
867 XFS_DA_LOGRANGE(node1, btree_d, tmp));
868
869 /*
870 * Move elements in node2 down to fill the hole.
871 */
872 tmp = nodehdr2.count - count;
873 tmp *= (uint)sizeof(xfs_da_node_entry_t);
874 btree_s = &btree2[count];
875 btree_d = &btree2[0];
876 memmove(btree_d, btree_s, tmp);
877 nodehdr2.count -= count;
878 }
879
880 /*
881 * Log header of node 1 and all current bits of node 2.
882 */
883 xfs_da3_node_hdr_to_disk(node1, &nodehdr1);
884 xfs_trans_log_buf(tp, blk1->bp,
885 XFS_DA_LOGRANGE(node1, &node1->hdr,
886 xfs_da3_node_hdr_size(node1)));
887
888 xfs_da3_node_hdr_to_disk(node2, &nodehdr2);
889 xfs_trans_log_buf(tp, blk2->bp,
890 XFS_DA_LOGRANGE(node2, &node2->hdr,
891 xfs_da3_node_hdr_size(node2) +
892 (sizeof(btree2[0]) * nodehdr2.count)));
893
894 /*
895 * Record the last hashval from each block for upward propagation.
896 * (note: don't use the swapped node pointers)
897 */
898 if (swap) {
899 node1 = blk1->bp->b_addr;
900 node2 = blk2->bp->b_addr;
901 xfs_da3_node_hdr_from_disk(&nodehdr1, node1);
902 xfs_da3_node_hdr_from_disk(&nodehdr2, node2);
903 btree1 = xfs_da3_node_tree_p(node1);
904 btree2 = xfs_da3_node_tree_p(node2);
905 }
906 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
907 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
908
909 /*
910 * Adjust the expected index for insertion.
911 */
912 if (blk1->index >= nodehdr1.count) {
913 blk2->index = blk1->index - nodehdr1.count;
914 blk1->index = nodehdr1.count + 1; /* make it invalid */
915 }
916 }
917
918 /*
919 * Add a new entry to an intermediate node.
920 */
921 STATIC void
922 xfs_da3_node_add(
923 struct xfs_da_state *state,
924 struct xfs_da_state_blk *oldblk,
925 struct xfs_da_state_blk *newblk)
926 {
927 struct xfs_da_intnode *node;
928 struct xfs_da3_icnode_hdr nodehdr;
929 struct xfs_da_node_entry *btree;
930 int tmp;
931
932 trace_xfs_da_node_add(state->args);
933
934 node = oldblk->bp->b_addr;
935 xfs_da3_node_hdr_from_disk(&nodehdr, node);
936 btree = xfs_da3_node_tree_p(node);
937
938 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
939 ASSERT(newblk->blkno != 0);
940 if (state->args->whichfork == XFS_DATA_FORK)
941 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
942 newblk->blkno < state->mp->m_dirfreeblk);
943
944 /*
945 * We may need to make some room before we insert the new node.
946 */
947 tmp = 0;
948 if (oldblk->index < nodehdr.count) {
949 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
950 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
951 }
952 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
953 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
954 xfs_trans_log_buf(state->args->trans, oldblk->bp,
955 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
956 tmp + sizeof(*btree)));
957
958 nodehdr.count += 1;
959 xfs_da3_node_hdr_to_disk(node, &nodehdr);
960 xfs_trans_log_buf(state->args->trans, oldblk->bp,
961 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
962
963 /*
964 * Copy the last hash value from the oldblk to propagate upwards.
965 */
966 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
967 }
968
969 /*========================================================================
970 * Routines used for shrinking the Btree.
971 *========================================================================*/
972
973 /*
974 * Deallocate an empty leaf node, remove it from its parent,
975 * possibly deallocating that block, etc...
976 */
977 int
978 xfs_da3_join(
979 struct xfs_da_state *state)
980 {
981 struct xfs_da_state_blk *drop_blk;
982 struct xfs_da_state_blk *save_blk;
983 int action = 0;
984 int error;
985
986 trace_xfs_da_join(state->args);
987
988 drop_blk = &state->path.blk[ state->path.active-1 ];
989 save_blk = &state->altpath.blk[ state->path.active-1 ];
990 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
991 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
992 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
993
994 /*
995 * Walk back up the tree joining/deallocating as necessary.
996 * When we stop dropping blocks, break out.
997 */
998 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
999 state->path.active--) {
1000 /*
1001 * See if we can combine the block with a neighbor.
1002 * (action == 0) => no options, just leave
1003 * (action == 1) => coalesce, then unlink
1004 * (action == 2) => block empty, unlink it
1005 */
1006 switch (drop_blk->magic) {
1007 case XFS_ATTR_LEAF_MAGIC:
1008 error = xfs_attr3_leaf_toosmall(state, &action);
1009 if (error)
1010 return(error);
1011 if (action == 0)
1012 return(0);
1013 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1014 break;
1015 case XFS_DIR2_LEAFN_MAGIC:
1016 error = xfs_dir2_leafn_toosmall(state, &action);
1017 if (error)
1018 return error;
1019 if (action == 0)
1020 return 0;
1021 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1022 break;
1023 case XFS_DA_NODE_MAGIC:
1024 /*
1025 * Remove the offending node, fixup hashvals,
1026 * check for a toosmall neighbor.
1027 */
1028 xfs_da3_node_remove(state, drop_blk);
1029 xfs_da3_fixhashpath(state, &state->path);
1030 error = xfs_da3_node_toosmall(state, &action);
1031 if (error)
1032 return(error);
1033 if (action == 0)
1034 return 0;
1035 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1036 break;
1037 }
1038 xfs_da3_fixhashpath(state, &state->altpath);
1039 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1040 xfs_da_state_kill_altpath(state);
1041 if (error)
1042 return(error);
1043 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1044 drop_blk->bp);
1045 drop_blk->bp = NULL;
1046 if (error)
1047 return(error);
1048 }
1049 /*
1050 * We joined all the way to the top. If it turns out that
1051 * we only have one entry in the root, make the child block
1052 * the new root.
1053 */
1054 xfs_da3_node_remove(state, drop_blk);
1055 xfs_da3_fixhashpath(state, &state->path);
1056 error = xfs_da3_root_join(state, &state->path.blk[0]);
1057 return(error);
1058 }
1059
1060 #ifdef DEBUG
1061 static void
1062 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1063 {
1064 __be16 magic = blkinfo->magic;
1065
1066 if (level == 1) {
1067 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1068 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1069 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1070 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1071 } else {
1072 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1073 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1074 }
1075 ASSERT(!blkinfo->forw);
1076 ASSERT(!blkinfo->back);
1077 }
1078 #else /* !DEBUG */
1079 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1080 #endif /* !DEBUG */
1081
1082 /*
1083 * We have only one entry in the root. Copy the only remaining child of
1084 * the old root to block 0 as the new root node.
1085 */
1086 STATIC int
1087 xfs_da3_root_join(
1088 struct xfs_da_state *state,
1089 struct xfs_da_state_blk *root_blk)
1090 {
1091 struct xfs_da_intnode *oldroot;
1092 struct xfs_da_args *args;
1093 xfs_dablk_t child;
1094 struct xfs_buf *bp;
1095 struct xfs_da3_icnode_hdr oldroothdr;
1096 struct xfs_da_node_entry *btree;
1097 int error;
1098
1099 trace_xfs_da_root_join(state->args);
1100
1101 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1102
1103 args = state->args;
1104 oldroot = root_blk->bp->b_addr;
1105 xfs_da3_node_hdr_from_disk(&oldroothdr, oldroot);
1106 ASSERT(oldroothdr.forw == 0);
1107 ASSERT(oldroothdr.back == 0);
1108
1109 /*
1110 * If the root has more than one child, then don't do anything.
1111 */
1112 if (oldroothdr.count > 1)
1113 return 0;
1114
1115 /*
1116 * Read in the (only) child block, then copy those bytes into
1117 * the root block's buffer and free the original child block.
1118 */
1119 btree = xfs_da3_node_tree_p(oldroot);
1120 child = be32_to_cpu(btree[0].before);
1121 ASSERT(child != 0);
1122 error = xfs_da3_node_read(args->trans, args->dp, child, -1, &bp,
1123 args->whichfork);
1124 if (error)
1125 return error;
1126 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1127
1128 /*
1129 * This could be copying a leaf back into the root block in the case of
1130 * there only being a single leaf block left in the tree. Hence we have
1131 * to update the b_ops pointer as well to match the buffer type change
1132 * that could occur. For dir3 blocks we also need to update the block
1133 * number in the buffer header.
1134 */
1135 memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
1136 root_blk->bp->b_ops = bp->b_ops;
1137 xfs_trans_buf_copy_type(root_blk->bp, bp);
1138 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1139 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1140 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1141 }
1142 xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
1143 error = xfs_da_shrink_inode(args, child, bp);
1144 return(error);
1145 }
1146
1147 /*
1148 * Check a node block and its neighbors to see if the block should be
1149 * collapsed into one or the other neighbor. Always keep the block
1150 * with the smaller block number.
1151 * If the current block is over 50% full, don't try to join it, return 0.
1152 * If the block is empty, fill in the state structure and return 2.
1153 * If it can be collapsed, fill in the state structure and return 1.
1154 * If nothing can be done, return 0.
1155 */
1156 STATIC int
1157 xfs_da3_node_toosmall(
1158 struct xfs_da_state *state,
1159 int *action)
1160 {
1161 struct xfs_da_intnode *node;
1162 struct xfs_da_state_blk *blk;
1163 struct xfs_da_blkinfo *info;
1164 xfs_dablk_t blkno;
1165 struct xfs_buf *bp;
1166 struct xfs_da3_icnode_hdr nodehdr;
1167 int count;
1168 int forward;
1169 int error;
1170 int retval;
1171 int i;
1172
1173 trace_xfs_da_node_toosmall(state->args);
1174
1175 /*
1176 * Check for the degenerate case of the block being over 50% full.
1177 * If so, it's not worth even looking to see if we might be able
1178 * to coalesce with a sibling.
1179 */
1180 blk = &state->path.blk[ state->path.active-1 ];
1181 info = blk->bp->b_addr;
1182 node = (xfs_da_intnode_t *)info;
1183 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1184 if (nodehdr.count > (state->node_ents >> 1)) {
1185 *action = 0; /* blk over 50%, don't try to join */
1186 return(0); /* blk over 50%, don't try to join */
1187 }
1188
1189 /*
1190 * Check for the degenerate case of the block being empty.
1191 * If the block is empty, we'll simply delete it, no need to
1192 * coalesce it with a sibling block. We choose (arbitrarily)
1193 * to merge with the forward block unless it is NULL.
1194 */
1195 if (nodehdr.count == 0) {
1196 /*
1197 * Make altpath point to the block we want to keep and
1198 * path point to the block we want to drop (this one).
1199 */
1200 forward = (info->forw != 0);
1201 memcpy(&state->altpath, &state->path, sizeof(state->path));
1202 error = xfs_da3_path_shift(state, &state->altpath, forward,
1203 0, &retval);
1204 if (error)
1205 return(error);
1206 if (retval) {
1207 *action = 0;
1208 } else {
1209 *action = 2;
1210 }
1211 return(0);
1212 }
1213
1214 /*
1215 * Examine each sibling block to see if we can coalesce with
1216 * at least 25% free space to spare. We need to figure out
1217 * whether to merge with the forward or the backward block.
1218 * We prefer coalescing with the lower numbered sibling so as
1219 * to shrink a directory over time.
1220 */
1221 count = state->node_ents;
1222 count -= state->node_ents >> 2;
1223 count -= nodehdr.count;
1224
1225 /* start with smaller blk num */
1226 forward = nodehdr.forw < nodehdr.back;
1227 for (i = 0; i < 2; forward = !forward, i++) {
1228 struct xfs_da3_icnode_hdr thdr;
1229 if (forward)
1230 blkno = nodehdr.forw;
1231 else
1232 blkno = nodehdr.back;
1233 if (blkno == 0)
1234 continue;
1235 error = xfs_da3_node_read(state->args->trans, state->args->dp,
1236 blkno, -1, &bp, state->args->whichfork);
1237 if (error)
1238 return(error);
1239
1240 node = bp->b_addr;
1241 xfs_da3_node_hdr_from_disk(&thdr, node);
1242 xfs_trans_brelse(state->args->trans, bp);
1243
1244 if (count - thdr.count >= 0)
1245 break; /* fits with at least 25% to spare */
1246 }
1247 if (i >= 2) {
1248 *action = 0;
1249 return 0;
1250 }
1251
1252 /*
1253 * Make altpath point to the block we want to keep (the lower
1254 * numbered block) and path point to the block we want to drop.
1255 */
1256 memcpy(&state->altpath, &state->path, sizeof(state->path));
1257 if (blkno < blk->blkno) {
1258 error = xfs_da3_path_shift(state, &state->altpath, forward,
1259 0, &retval);
1260 } else {
1261 error = xfs_da3_path_shift(state, &state->path, forward,
1262 0, &retval);
1263 }
1264 if (error)
1265 return error;
1266 if (retval) {
1267 *action = 0;
1268 return 0;
1269 }
1270 *action = 1;
1271 return 0;
1272 }
1273
1274 /*
1275 * Pick up the last hashvalue from an intermediate node.
1276 */
1277 STATIC uint
1278 xfs_da3_node_lasthash(
1279 struct xfs_buf *bp,
1280 int *count)
1281 {
1282 struct xfs_da_intnode *node;
1283 struct xfs_da_node_entry *btree;
1284 struct xfs_da3_icnode_hdr nodehdr;
1285
1286 node = bp->b_addr;
1287 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1288 if (count)
1289 *count = nodehdr.count;
1290 if (!nodehdr.count)
1291 return 0;
1292 btree = xfs_da3_node_tree_p(node);
1293 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1294 }
1295
1296 /*
1297 * Walk back up the tree adjusting hash values as necessary,
1298 * when we stop making changes, return.
1299 */
1300 void
1301 xfs_da3_fixhashpath(
1302 struct xfs_da_state *state,
1303 struct xfs_da_state_path *path)
1304 {
1305 struct xfs_da_state_blk *blk;
1306 struct xfs_da_intnode *node;
1307 struct xfs_da_node_entry *btree;
1308 xfs_dahash_t lasthash=0;
1309 int level;
1310 int count;
1311
1312 trace_xfs_da_fixhashpath(state->args);
1313
1314 level = path->active-1;
1315 blk = &path->blk[ level ];
1316 switch (blk->magic) {
1317 case XFS_ATTR_LEAF_MAGIC:
1318 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1319 if (count == 0)
1320 return;
1321 break;
1322 case XFS_DIR2_LEAFN_MAGIC:
1323 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
1324 if (count == 0)
1325 return;
1326 break;
1327 case XFS_DA_NODE_MAGIC:
1328 lasthash = xfs_da3_node_lasthash(blk->bp, &count);
1329 if (count == 0)
1330 return;
1331 break;
1332 }
1333 for (blk--, level--; level >= 0; blk--, level--) {
1334 struct xfs_da3_icnode_hdr nodehdr;
1335
1336 node = blk->bp->b_addr;
1337 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1338 btree = xfs_da3_node_tree_p(node);
1339 if (be32_to_cpu(btree->hashval) == lasthash)
1340 break;
1341 blk->hashval = lasthash;
1342 btree[blk->index].hashval = cpu_to_be32(lasthash);
1343 xfs_trans_log_buf(state->args->trans, blk->bp,
1344 XFS_DA_LOGRANGE(node, &btree[blk->index],
1345 sizeof(*btree)));
1346
1347 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1348 }
1349 }
1350
1351 /*
1352 * Remove an entry from an intermediate node.
1353 */
1354 STATIC void
1355 xfs_da3_node_remove(
1356 struct xfs_da_state *state,
1357 struct xfs_da_state_blk *drop_blk)
1358 {
1359 struct xfs_da_intnode *node;
1360 struct xfs_da3_icnode_hdr nodehdr;
1361 struct xfs_da_node_entry *btree;
1362 int index;
1363 int tmp;
1364
1365 trace_xfs_da_node_remove(state->args);
1366
1367 node = drop_blk->bp->b_addr;
1368 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1369 ASSERT(drop_blk->index < nodehdr.count);
1370 ASSERT(drop_blk->index >= 0);
1371
1372 /*
1373 * Copy over the offending entry, or just zero it out.
1374 */
1375 index = drop_blk->index;
1376 btree = xfs_da3_node_tree_p(node);
1377 if (index < nodehdr.count - 1) {
1378 tmp = nodehdr.count - index - 1;
1379 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1380 memmove(&btree[index], &btree[index + 1], tmp);
1381 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1382 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1383 index = nodehdr.count - 1;
1384 }
1385 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1386 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1387 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1388 nodehdr.count -= 1;
1389 xfs_da3_node_hdr_to_disk(node, &nodehdr);
1390 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1391 XFS_DA_LOGRANGE(node, &node->hdr, xfs_da3_node_hdr_size(node)));
1392
1393 /*
1394 * Copy the last hash value from the block to propagate upwards.
1395 */
1396 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1397 }
1398
1399 /*
1400 * Unbalance the elements between two intermediate nodes,
1401 * move all Btree elements from one node into another.
1402 */
1403 STATIC void
1404 xfs_da3_node_unbalance(
1405 struct xfs_da_state *state,
1406 struct xfs_da_state_blk *drop_blk,
1407 struct xfs_da_state_blk *save_blk)
1408 {
1409 struct xfs_da_intnode *drop_node;
1410 struct xfs_da_intnode *save_node;
1411 struct xfs_da_node_entry *drop_btree;
1412 struct xfs_da_node_entry *save_btree;
1413 struct xfs_da3_icnode_hdr drop_hdr;
1414 struct xfs_da3_icnode_hdr save_hdr;
1415 struct xfs_trans *tp;
1416 int sindex;
1417 int tmp;
1418
1419 trace_xfs_da_node_unbalance(state->args);
1420
1421 drop_node = drop_blk->bp->b_addr;
1422 save_node = save_blk->bp->b_addr;
1423 xfs_da3_node_hdr_from_disk(&drop_hdr, drop_node);
1424 xfs_da3_node_hdr_from_disk(&save_hdr, save_node);
1425 drop_btree = xfs_da3_node_tree_p(drop_node);
1426 save_btree = xfs_da3_node_tree_p(save_node);
1427 tp = state->args->trans;
1428
1429 /*
1430 * If the dying block has lower hashvals, then move all the
1431 * elements in the remaining block up to make a hole.
1432 */
1433 if ((be32_to_cpu(drop_btree[0].hashval) <
1434 be32_to_cpu(save_btree[0].hashval)) ||
1435 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1436 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1437 /* XXX: check this - is memmove dst correct? */
1438 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1439 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1440
1441 sindex = 0;
1442 xfs_trans_log_buf(tp, save_blk->bp,
1443 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1444 (save_hdr.count + drop_hdr.count) *
1445 sizeof(xfs_da_node_entry_t)));
1446 } else {
1447 sindex = save_hdr.count;
1448 xfs_trans_log_buf(tp, save_blk->bp,
1449 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1450 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1451 }
1452
1453 /*
1454 * Move all the B-tree elements from drop_blk to save_blk.
1455 */
1456 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1457 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1458 save_hdr.count += drop_hdr.count;
1459
1460 xfs_da3_node_hdr_to_disk(save_node, &save_hdr);
1461 xfs_trans_log_buf(tp, save_blk->bp,
1462 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1463 xfs_da3_node_hdr_size(save_node)));
1464
1465 /*
1466 * Save the last hashval in the remaining block for upward propagation.
1467 */
1468 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1469 }
1470
1471 /*========================================================================
1472 * Routines used for finding things in the Btree.
1473 *========================================================================*/
1474
1475 /*
1476 * Walk down the Btree looking for a particular filename, filling
1477 * in the state structure as we go.
1478 *
1479 * We will set the state structure to point to each of the elements
1480 * in each of the nodes where either the hashval is or should be.
1481 *
1482 * We support duplicate hashval's so for each entry in the current
1483 * node that could contain the desired hashval, descend. This is a
1484 * pruned depth-first tree search.
1485 */
1486 int /* error */
1487 xfs_da3_node_lookup_int(
1488 struct xfs_da_state *state,
1489 int *result)
1490 {
1491 struct xfs_da_state_blk *blk;
1492 struct xfs_da_blkinfo *curr;
1493 struct xfs_da_intnode *node;
1494 struct xfs_da_node_entry *btree;
1495 struct xfs_da3_icnode_hdr nodehdr;
1496 struct xfs_da_args *args;
1497 xfs_dablk_t blkno;
1498 xfs_dahash_t hashval;
1499 xfs_dahash_t btreehashval;
1500 int probe;
1501 int span;
1502 int max;
1503 int error;
1504 int retval;
1505
1506 args = state->args;
1507
1508 /*
1509 * Descend thru the B-tree searching each level for the right
1510 * node to use, until the right hashval is found.
1511 */
1512 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1513 for (blk = &state->path.blk[0], state->path.active = 1;
1514 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1515 blk++, state->path.active++) {
1516 /*
1517 * Read the next node down in the tree.
1518 */
1519 blk->blkno = blkno;
1520 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1521 -1, &blk->bp, args->whichfork);
1522 if (error) {
1523 blk->blkno = 0;
1524 state->path.active--;
1525 return(error);
1526 }
1527 curr = blk->bp->b_addr;
1528 blk->magic = be16_to_cpu(curr->magic);
1529
1530 if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1531 blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1532 blk->magic = XFS_ATTR_LEAF_MAGIC;
1533 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1534 break;
1535 }
1536
1537 if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1538 blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1539 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1540 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1541 break;
1542 }
1543
1544 blk->magic = XFS_DA_NODE_MAGIC;
1545
1546
1547 /*
1548 * Search an intermediate node for a match.
1549 */
1550 node = blk->bp->b_addr;
1551 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1552 btree = xfs_da3_node_tree_p(node);
1553
1554 max = nodehdr.count;
1555 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1556
1557 /*
1558 * Binary search. (note: small blocks will skip loop)
1559 */
1560 probe = span = max / 2;
1561 hashval = args->hashval;
1562 while (span > 4) {
1563 span /= 2;
1564 btreehashval = be32_to_cpu(btree[probe].hashval);
1565 if (btreehashval < hashval)
1566 probe += span;
1567 else if (btreehashval > hashval)
1568 probe -= span;
1569 else
1570 break;
1571 }
1572 ASSERT((probe >= 0) && (probe < max));
1573 ASSERT((span <= 4) ||
1574 (be32_to_cpu(btree[probe].hashval) == hashval));
1575
1576 /*
1577 * Since we may have duplicate hashval's, find the first
1578 * matching hashval in the node.
1579 */
1580 while (probe > 0 &&
1581 be32_to_cpu(btree[probe].hashval) >= hashval) {
1582 probe--;
1583 }
1584 while (probe < max &&
1585 be32_to_cpu(btree[probe].hashval) < hashval) {
1586 probe++;
1587 }
1588
1589 /*
1590 * Pick the right block to descend on.
1591 */
1592 if (probe == max) {
1593 blk->index = max - 1;
1594 blkno = be32_to_cpu(btree[max - 1].before);
1595 } else {
1596 blk->index = probe;
1597 blkno = be32_to_cpu(btree[probe].before);
1598 }
1599 }
1600
1601 /*
1602 * A leaf block that ends in the hashval that we are interested in
1603 * (final hashval == search hashval) means that the next block may
1604 * contain more entries with the same hashval, shift upward to the
1605 * next leaf and keep searching.
1606 */
1607 for (;;) {
1608 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1609 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1610 &blk->index, state);
1611 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1612 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1613 blk->index = args->index;
1614 args->blkno = blk->blkno;
1615 } else {
1616 ASSERT(0);
1617 return XFS_ERROR(EFSCORRUPTED);
1618 }
1619 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1620 (blk->hashval == args->hashval)) {
1621 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1622 &retval);
1623 if (error)
1624 return(error);
1625 if (retval == 0) {
1626 continue;
1627 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1628 /* path_shift() gives ENOENT */
1629 retval = XFS_ERROR(ENOATTR);
1630 }
1631 }
1632 break;
1633 }
1634 *result = retval;
1635 return(0);
1636 }
1637
1638 /*========================================================================
1639 * Utility routines.
1640 *========================================================================*/
1641
1642 /*
1643 * Compare two intermediate nodes for "order".
1644 */
1645 STATIC int
1646 xfs_da3_node_order(
1647 struct xfs_buf *node1_bp,
1648 struct xfs_buf *node2_bp)
1649 {
1650 struct xfs_da_intnode *node1;
1651 struct xfs_da_intnode *node2;
1652 struct xfs_da_node_entry *btree1;
1653 struct xfs_da_node_entry *btree2;
1654 struct xfs_da3_icnode_hdr node1hdr;
1655 struct xfs_da3_icnode_hdr node2hdr;
1656
1657 node1 = node1_bp->b_addr;
1658 node2 = node2_bp->b_addr;
1659 xfs_da3_node_hdr_from_disk(&node1hdr, node1);
1660 xfs_da3_node_hdr_from_disk(&node2hdr, node2);
1661 btree1 = xfs_da3_node_tree_p(node1);
1662 btree2 = xfs_da3_node_tree_p(node2);
1663
1664 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1665 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1666 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1667 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1668 return 1;
1669 }
1670 return 0;
1671 }
1672
1673 /*
1674 * Link a new block into a doubly linked list of blocks (of whatever type).
1675 */
1676 int /* error */
1677 xfs_da3_blk_link(
1678 struct xfs_da_state *state,
1679 struct xfs_da_state_blk *old_blk,
1680 struct xfs_da_state_blk *new_blk)
1681 {
1682 struct xfs_da_blkinfo *old_info;
1683 struct xfs_da_blkinfo *new_info;
1684 struct xfs_da_blkinfo *tmp_info;
1685 struct xfs_da_args *args;
1686 struct xfs_buf *bp;
1687 int before = 0;
1688 int error;
1689
1690 /*
1691 * Set up environment.
1692 */
1693 args = state->args;
1694 ASSERT(args != NULL);
1695 old_info = old_blk->bp->b_addr;
1696 new_info = new_blk->bp->b_addr;
1697 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1698 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1699 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1700
1701 switch (old_blk->magic) {
1702 case XFS_ATTR_LEAF_MAGIC:
1703 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1704 break;
1705 case XFS_DIR2_LEAFN_MAGIC:
1706 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1707 break;
1708 case XFS_DA_NODE_MAGIC:
1709 before = xfs_da3_node_order(old_blk->bp, new_blk->bp);
1710 break;
1711 }
1712
1713 /*
1714 * Link blocks in appropriate order.
1715 */
1716 if (before) {
1717 /*
1718 * Link new block in before existing block.
1719 */
1720 trace_xfs_da_link_before(args);
1721 new_info->forw = cpu_to_be32(old_blk->blkno);
1722 new_info->back = old_info->back;
1723 if (old_info->back) {
1724 error = xfs_da3_node_read(args->trans, args->dp,
1725 be32_to_cpu(old_info->back),
1726 -1, &bp, args->whichfork);
1727 if (error)
1728 return(error);
1729 ASSERT(bp != NULL);
1730 tmp_info = bp->b_addr;
1731 ASSERT(tmp_info->magic == old_info->magic);
1732 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1733 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1734 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1735 }
1736 old_info->back = cpu_to_be32(new_blk->blkno);
1737 } else {
1738 /*
1739 * Link new block in after existing block.
1740 */
1741 trace_xfs_da_link_after(args);
1742 new_info->forw = old_info->forw;
1743 new_info->back = cpu_to_be32(old_blk->blkno);
1744 if (old_info->forw) {
1745 error = xfs_da3_node_read(args->trans, args->dp,
1746 be32_to_cpu(old_info->forw),
1747 -1, &bp, args->whichfork);
1748 if (error)
1749 return(error);
1750 ASSERT(bp != NULL);
1751 tmp_info = bp->b_addr;
1752 ASSERT(tmp_info->magic == old_info->magic);
1753 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1754 tmp_info->back = cpu_to_be32(new_blk->blkno);
1755 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1756 }
1757 old_info->forw = cpu_to_be32(new_blk->blkno);
1758 }
1759
1760 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1761 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1762 return(0);
1763 }
1764
1765 /*
1766 * Unlink a block from a doubly linked list of blocks.
1767 */
1768 STATIC int /* error */
1769 xfs_da3_blk_unlink(
1770 struct xfs_da_state *state,
1771 struct xfs_da_state_blk *drop_blk,
1772 struct xfs_da_state_blk *save_blk)
1773 {
1774 struct xfs_da_blkinfo *drop_info;
1775 struct xfs_da_blkinfo *save_info;
1776 struct xfs_da_blkinfo *tmp_info;
1777 struct xfs_da_args *args;
1778 struct xfs_buf *bp;
1779 int error;
1780
1781 /*
1782 * Set up environment.
1783 */
1784 args = state->args;
1785 ASSERT(args != NULL);
1786 save_info = save_blk->bp->b_addr;
1787 drop_info = drop_blk->bp->b_addr;
1788 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1789 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1790 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1791 ASSERT(save_blk->magic == drop_blk->magic);
1792 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1793 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1794 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1795 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1796
1797 /*
1798 * Unlink the leaf block from the doubly linked chain of leaves.
1799 */
1800 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1801 trace_xfs_da_unlink_back(args);
1802 save_info->back = drop_info->back;
1803 if (drop_info->back) {
1804 error = xfs_da3_node_read(args->trans, args->dp,
1805 be32_to_cpu(drop_info->back),
1806 -1, &bp, args->whichfork);
1807 if (error)
1808 return(error);
1809 ASSERT(bp != NULL);
1810 tmp_info = bp->b_addr;
1811 ASSERT(tmp_info->magic == save_info->magic);
1812 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1813 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1814 xfs_trans_log_buf(args->trans, bp, 0,
1815 sizeof(*tmp_info) - 1);
1816 }
1817 } else {
1818 trace_xfs_da_unlink_forward(args);
1819 save_info->forw = drop_info->forw;
1820 if (drop_info->forw) {
1821 error = xfs_da3_node_read(args->trans, args->dp,
1822 be32_to_cpu(drop_info->forw),
1823 -1, &bp, args->whichfork);
1824 if (error)
1825 return(error);
1826 ASSERT(bp != NULL);
1827 tmp_info = bp->b_addr;
1828 ASSERT(tmp_info->magic == save_info->magic);
1829 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1830 tmp_info->back = cpu_to_be32(save_blk->blkno);
1831 xfs_trans_log_buf(args->trans, bp, 0,
1832 sizeof(*tmp_info) - 1);
1833 }
1834 }
1835
1836 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1837 return(0);
1838 }
1839
1840 /*
1841 * Move a path "forward" or "!forward" one block at the current level.
1842 *
1843 * This routine will adjust a "path" to point to the next block
1844 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1845 * Btree, including updating pointers to the intermediate nodes between
1846 * the new bottom and the root.
1847 */
1848 int /* error */
1849 xfs_da3_path_shift(
1850 struct xfs_da_state *state,
1851 struct xfs_da_state_path *path,
1852 int forward,
1853 int release,
1854 int *result)
1855 {
1856 struct xfs_da_state_blk *blk;
1857 struct xfs_da_blkinfo *info;
1858 struct xfs_da_intnode *node;
1859 struct xfs_da_args *args;
1860 struct xfs_da_node_entry *btree;
1861 struct xfs_da3_icnode_hdr nodehdr;
1862 xfs_dablk_t blkno = 0;
1863 int level;
1864 int error;
1865
1866 trace_xfs_da_path_shift(state->args);
1867
1868 /*
1869 * Roll up the Btree looking for the first block where our
1870 * current index is not at the edge of the block. Note that
1871 * we skip the bottom layer because we want the sibling block.
1872 */
1873 args = state->args;
1874 ASSERT(args != NULL);
1875 ASSERT(path != NULL);
1876 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1877 level = (path->active-1) - 1; /* skip bottom layer in path */
1878 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1879 node = blk->bp->b_addr;
1880 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1881 btree = xfs_da3_node_tree_p(node);
1882
1883 if (forward && (blk->index < nodehdr.count - 1)) {
1884 blk->index++;
1885 blkno = be32_to_cpu(btree[blk->index].before);
1886 break;
1887 } else if (!forward && (blk->index > 0)) {
1888 blk->index--;
1889 blkno = be32_to_cpu(btree[blk->index].before);
1890 break;
1891 }
1892 }
1893 if (level < 0) {
1894 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1895 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1896 return(0);
1897 }
1898
1899 /*
1900 * Roll down the edge of the subtree until we reach the
1901 * same depth we were at originally.
1902 */
1903 for (blk++, level++; level < path->active; blk++, level++) {
1904 /*
1905 * Release the old block.
1906 * (if it's dirty, trans won't actually let go)
1907 */
1908 if (release)
1909 xfs_trans_brelse(args->trans, blk->bp);
1910
1911 /*
1912 * Read the next child block.
1913 */
1914 blk->blkno = blkno;
1915 error = xfs_da3_node_read(args->trans, args->dp, blkno, -1,
1916 &blk->bp, args->whichfork);
1917 if (error)
1918 return(error);
1919 info = blk->bp->b_addr;
1920 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1921 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1922 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1923 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1924 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1925 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1926
1927
1928 /*
1929 * Note: we flatten the magic number to a single type so we
1930 * don't have to compare against crc/non-crc types elsewhere.
1931 */
1932 switch (be16_to_cpu(info->magic)) {
1933 case XFS_DA_NODE_MAGIC:
1934 case XFS_DA3_NODE_MAGIC:
1935 blk->magic = XFS_DA_NODE_MAGIC;
1936 node = (xfs_da_intnode_t *)info;
1937 xfs_da3_node_hdr_from_disk(&nodehdr, node);
1938 btree = xfs_da3_node_tree_p(node);
1939 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1940 if (forward)
1941 blk->index = 0;
1942 else
1943 blk->index = nodehdr.count - 1;
1944 blkno = be32_to_cpu(btree[blk->index].before);
1945 break;
1946 case XFS_ATTR_LEAF_MAGIC:
1947 case XFS_ATTR3_LEAF_MAGIC:
1948 blk->magic = XFS_ATTR_LEAF_MAGIC;
1949 ASSERT(level == path->active-1);
1950 blk->index = 0;
1951 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1952 NULL);
1953 break;
1954 case XFS_DIR2_LEAFN_MAGIC:
1955 case XFS_DIR3_LEAFN_MAGIC:
1956 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1957 ASSERT(level == path->active-1);
1958 blk->index = 0;
1959 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1960 NULL);
1961 break;
1962 default:
1963 ASSERT(0);
1964 break;
1965 }
1966 }
1967 *result = 0;
1968 return 0;
1969 }
1970
1971
1972 /*========================================================================
1973 * Utility routines.
1974 *========================================================================*/
1975
1976 /*
1977 * Implement a simple hash on a character string.
1978 * Rotate the hash value by 7 bits, then XOR each character in.
1979 * This is implemented with some source-level loop unrolling.
1980 */
1981 xfs_dahash_t
1982 xfs_da_hashname(const __uint8_t *name, int namelen)
1983 {
1984 xfs_dahash_t hash;
1985
1986 /*
1987 * Do four characters at a time as long as we can.
1988 */
1989 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1990 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1991 (name[3] << 0) ^ rol32(hash, 7 * 4);
1992
1993 /*
1994 * Now do the rest of the characters.
1995 */
1996 switch (namelen) {
1997 case 3:
1998 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1999 rol32(hash, 7 * 3);
2000 case 2:
2001 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2002 case 1:
2003 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2004 default: /* case 0: */
2005 return hash;
2006 }
2007 }
2008
2009 enum xfs_dacmp
2010 xfs_da_compname(
2011 struct xfs_da_args *args,
2012 const unsigned char *name,
2013 int len)
2014 {
2015 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2016 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2017 }
2018
2019 static xfs_dahash_t
2020 xfs_default_hashname(
2021 struct xfs_name *name)
2022 {
2023 return xfs_da_hashname(name->name, name->len);
2024 }
2025
2026 const struct xfs_nameops xfs_default_nameops = {
2027 .hashname = xfs_default_hashname,
2028 .compname = xfs_da_compname
2029 };
2030
2031 int
2032 xfs_da_grow_inode_int(
2033 struct xfs_da_args *args,
2034 xfs_fileoff_t *bno,
2035 int count)
2036 {
2037 struct xfs_trans *tp = args->trans;
2038 struct xfs_inode *dp = args->dp;
2039 int w = args->whichfork;
2040 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
2041 struct xfs_bmbt_irec map, *mapp;
2042 int nmap, error, got, i, mapi;
2043
2044 /*
2045 * Find a spot in the file space to put the new block.
2046 */
2047 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2048 if (error)
2049 return error;
2050
2051 /*
2052 * Try mapping it in one filesystem block.
2053 */
2054 nmap = 1;
2055 ASSERT(args->firstblock != NULL);
2056 error = xfs_bmapi_write(tp, dp, *bno, count,
2057 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2058 args->firstblock, args->total, &map, &nmap,
2059 args->flist);
2060 if (error)
2061 return error;
2062
2063 ASSERT(nmap <= 1);
2064 if (nmap == 1) {
2065 mapp = &map;
2066 mapi = 1;
2067 } else if (nmap == 0 && count > 1) {
2068 xfs_fileoff_t b;
2069 int c;
2070
2071 /*
2072 * If we didn't get it and the block might work if fragmented,
2073 * try without the CONTIG flag. Loop until we get it all.
2074 */
2075 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2076 for (b = *bno, mapi = 0; b < *bno + count; ) {
2077 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2078 c = (int)(*bno + count - b);
2079 error = xfs_bmapi_write(tp, dp, b, c,
2080 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2081 args->firstblock, args->total,
2082 &mapp[mapi], &nmap, args->flist);
2083 if (error)
2084 goto out_free_map;
2085 if (nmap < 1)
2086 break;
2087 mapi += nmap;
2088 b = mapp[mapi - 1].br_startoff +
2089 mapp[mapi - 1].br_blockcount;
2090 }
2091 } else {
2092 mapi = 0;
2093 mapp = NULL;
2094 }
2095
2096 /*
2097 * Count the blocks we got, make sure it matches the total.
2098 */
2099 for (i = 0, got = 0; i < mapi; i++)
2100 got += mapp[i].br_blockcount;
2101 if (got != count || mapp[0].br_startoff != *bno ||
2102 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2103 *bno + count) {
2104 error = XFS_ERROR(ENOSPC);
2105 goto out_free_map;
2106 }
2107
2108 /* account for newly allocated blocks in reserved blocks total */
2109 args->total -= dp->i_d.di_nblocks - nblks;
2110
2111 out_free_map:
2112 if (mapp != &map)
2113 kmem_free(mapp);
2114 return error;
2115 }
2116
2117 /*
2118 * Add a block to the btree ahead of the file.
2119 * Return the new block number to the caller.
2120 */
2121 int
2122 xfs_da_grow_inode(
2123 struct xfs_da_args *args,
2124 xfs_dablk_t *new_blkno)
2125 {
2126 xfs_fileoff_t bno;
2127 int count;
2128 int error;
2129
2130 trace_xfs_da_grow_inode(args);
2131
2132 if (args->whichfork == XFS_DATA_FORK) {
2133 bno = args->dp->i_mount->m_dirleafblk;
2134 count = args->dp->i_mount->m_dirblkfsbs;
2135 } else {
2136 bno = 0;
2137 count = 1;
2138 }
2139
2140 error = xfs_da_grow_inode_int(args, &bno, count);
2141 if (!error)
2142 *new_blkno = (xfs_dablk_t)bno;
2143 return error;
2144 }
2145
2146 /*
2147 * Ick. We need to always be able to remove a btree block, even
2148 * if there's no space reservation because the filesystem is full.
2149 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2150 * It swaps the target block with the last block in the file. The
2151 * last block in the file can always be removed since it can't cause
2152 * a bmap btree split to do that.
2153 */
2154 STATIC int
2155 xfs_da3_swap_lastblock(
2156 struct xfs_da_args *args,
2157 xfs_dablk_t *dead_blknop,
2158 struct xfs_buf **dead_bufp)
2159 {
2160 struct xfs_da_blkinfo *dead_info;
2161 struct xfs_da_blkinfo *sib_info;
2162 struct xfs_da_intnode *par_node;
2163 struct xfs_da_intnode *dead_node;
2164 struct xfs_dir2_leaf *dead_leaf2;
2165 struct xfs_da_node_entry *btree;
2166 struct xfs_da3_icnode_hdr par_hdr;
2167 struct xfs_inode *ip;
2168 struct xfs_trans *tp;
2169 struct xfs_mount *mp;
2170 struct xfs_buf *dead_buf;
2171 struct xfs_buf *last_buf;
2172 struct xfs_buf *sib_buf;
2173 struct xfs_buf *par_buf;
2174 xfs_dahash_t dead_hash;
2175 xfs_fileoff_t lastoff;
2176 xfs_dablk_t dead_blkno;
2177 xfs_dablk_t last_blkno;
2178 xfs_dablk_t sib_blkno;
2179 xfs_dablk_t par_blkno;
2180 int error;
2181 int w;
2182 int entno;
2183 int level;
2184 int dead_level;
2185
2186 trace_xfs_da_swap_lastblock(args);
2187
2188 dead_buf = *dead_bufp;
2189 dead_blkno = *dead_blknop;
2190 tp = args->trans;
2191 ip = args->dp;
2192 w = args->whichfork;
2193 ASSERT(w == XFS_DATA_FORK);
2194 mp = ip->i_mount;
2195 lastoff = mp->m_dirfreeblk;
2196 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
2197 if (error)
2198 return error;
2199 if (unlikely(lastoff == 0)) {
2200 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2201 mp);
2202 return XFS_ERROR(EFSCORRUPTED);
2203 }
2204 /*
2205 * Read the last block in the btree space.
2206 */
2207 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
2208 error = xfs_da3_node_read(tp, ip, last_blkno, -1, &last_buf, w);
2209 if (error)
2210 return error;
2211 /*
2212 * Copy the last block into the dead buffer and log it.
2213 */
2214 memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
2215 xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
2216 dead_info = dead_buf->b_addr;
2217 /*
2218 * Get values from the moved block.
2219 */
2220 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2221 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2222 struct xfs_dir3_icleaf_hdr leafhdr;
2223 struct xfs_dir2_leaf_entry *ents;
2224
2225 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2226 xfs_dir3_leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2227 ents = xfs_dir3_leaf_ents_p(dead_leaf2);
2228 dead_level = 0;
2229 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2230 } else {
2231 struct xfs_da3_icnode_hdr deadhdr;
2232
2233 dead_node = (xfs_da_intnode_t *)dead_info;
2234 xfs_da3_node_hdr_from_disk(&deadhdr, dead_node);
2235 btree = xfs_da3_node_tree_p(dead_node);
2236 dead_level = deadhdr.level;
2237 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2238 }
2239 sib_buf = par_buf = NULL;
2240 /*
2241 * If the moved block has a left sibling, fix up the pointers.
2242 */
2243 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2244 error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
2245 if (error)
2246 goto done;
2247 sib_info = sib_buf->b_addr;
2248 if (unlikely(
2249 be32_to_cpu(sib_info->forw) != last_blkno ||
2250 sib_info->magic != dead_info->magic)) {
2251 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2252 XFS_ERRLEVEL_LOW, mp);
2253 error = XFS_ERROR(EFSCORRUPTED);
2254 goto done;
2255 }
2256 sib_info->forw = cpu_to_be32(dead_blkno);
2257 xfs_trans_log_buf(tp, sib_buf,
2258 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2259 sizeof(sib_info->forw)));
2260 sib_buf = NULL;
2261 }
2262 /*
2263 * If the moved block has a right sibling, fix up the pointers.
2264 */
2265 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2266 error = xfs_da3_node_read(tp, ip, sib_blkno, -1, &sib_buf, w);
2267 if (error)
2268 goto done;
2269 sib_info = sib_buf->b_addr;
2270 if (unlikely(
2271 be32_to_cpu(sib_info->back) != last_blkno ||
2272 sib_info->magic != dead_info->magic)) {
2273 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2274 XFS_ERRLEVEL_LOW, mp);
2275 error = XFS_ERROR(EFSCORRUPTED);
2276 goto done;
2277 }
2278 sib_info->back = cpu_to_be32(dead_blkno);
2279 xfs_trans_log_buf(tp, sib_buf,
2280 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2281 sizeof(sib_info->back)));
2282 sib_buf = NULL;
2283 }
2284 par_blkno = mp->m_dirleafblk;
2285 level = -1;
2286 /*
2287 * Walk down the tree looking for the parent of the moved block.
2288 */
2289 for (;;) {
2290 error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
2291 if (error)
2292 goto done;
2293 par_node = par_buf->b_addr;
2294 xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
2295 if (level >= 0 && level != par_hdr.level + 1) {
2296 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2297 XFS_ERRLEVEL_LOW, mp);
2298 error = XFS_ERROR(EFSCORRUPTED);
2299 goto done;
2300 }
2301 level = par_hdr.level;
2302 btree = xfs_da3_node_tree_p(par_node);
2303 for (entno = 0;
2304 entno < par_hdr.count &&
2305 be32_to_cpu(btree[entno].hashval) < dead_hash;
2306 entno++)
2307 continue;
2308 if (entno == par_hdr.count) {
2309 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2310 XFS_ERRLEVEL_LOW, mp);
2311 error = XFS_ERROR(EFSCORRUPTED);
2312 goto done;
2313 }
2314 par_blkno = be32_to_cpu(btree[entno].before);
2315 if (level == dead_level + 1)
2316 break;
2317 xfs_trans_brelse(tp, par_buf);
2318 par_buf = NULL;
2319 }
2320 /*
2321 * We're in the right parent block.
2322 * Look for the right entry.
2323 */
2324 for (;;) {
2325 for (;
2326 entno < par_hdr.count &&
2327 be32_to_cpu(btree[entno].before) != last_blkno;
2328 entno++)
2329 continue;
2330 if (entno < par_hdr.count)
2331 break;
2332 par_blkno = par_hdr.forw;
2333 xfs_trans_brelse(tp, par_buf);
2334 par_buf = NULL;
2335 if (unlikely(par_blkno == 0)) {
2336 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2337 XFS_ERRLEVEL_LOW, mp);
2338 error = XFS_ERROR(EFSCORRUPTED);
2339 goto done;
2340 }
2341 error = xfs_da3_node_read(tp, ip, par_blkno, -1, &par_buf, w);
2342 if (error)
2343 goto done;
2344 par_node = par_buf->b_addr;
2345 xfs_da3_node_hdr_from_disk(&par_hdr, par_node);
2346 if (par_hdr.level != level) {
2347 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2348 XFS_ERRLEVEL_LOW, mp);
2349 error = XFS_ERROR(EFSCORRUPTED);
2350 goto done;
2351 }
2352 btree = xfs_da3_node_tree_p(par_node);
2353 entno = 0;
2354 }
2355 /*
2356 * Update the parent entry pointing to the moved block.
2357 */
2358 btree[entno].before = cpu_to_be32(dead_blkno);
2359 xfs_trans_log_buf(tp, par_buf,
2360 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2361 sizeof(btree[entno].before)));
2362 *dead_blknop = last_blkno;
2363 *dead_bufp = last_buf;
2364 return 0;
2365 done:
2366 if (par_buf)
2367 xfs_trans_brelse(tp, par_buf);
2368 if (sib_buf)
2369 xfs_trans_brelse(tp, sib_buf);
2370 xfs_trans_brelse(tp, last_buf);
2371 return error;
2372 }
2373
2374 /*
2375 * Remove a btree block from a directory or attribute.
2376 */
2377 int
2378 xfs_da_shrink_inode(
2379 xfs_da_args_t *args,
2380 xfs_dablk_t dead_blkno,
2381 struct xfs_buf *dead_buf)
2382 {
2383 xfs_inode_t *dp;
2384 int done, error, w, count;
2385 xfs_trans_t *tp;
2386 xfs_mount_t *mp;
2387
2388 trace_xfs_da_shrink_inode(args);
2389
2390 dp = args->dp;
2391 w = args->whichfork;
2392 tp = args->trans;
2393 mp = dp->i_mount;
2394 if (w == XFS_DATA_FORK)
2395 count = mp->m_dirblkfsbs;
2396 else
2397 count = 1;
2398 for (;;) {
2399 /*
2400 * Remove extents. If we get ENOSPC for a dir we have to move
2401 * the last block to the place we want to kill.
2402 */
2403 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2404 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2405 0, args->firstblock, args->flist, &done);
2406 if (error == ENOSPC) {
2407 if (w != XFS_DATA_FORK)
2408 break;
2409 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2410 &dead_buf);
2411 if (error)
2412 break;
2413 } else {
2414 break;
2415 }
2416 }
2417 xfs_trans_binval(tp, dead_buf);
2418 return error;
2419 }
2420
2421 /*
2422 * See if the mapping(s) for this btree block are valid, i.e.
2423 * don't contain holes, are logically contiguous, and cover the whole range.
2424 */
2425 STATIC int
2426 xfs_da_map_covers_blocks(
2427 int nmap,
2428 xfs_bmbt_irec_t *mapp,
2429 xfs_dablk_t bno,
2430 int count)
2431 {
2432 int i;
2433 xfs_fileoff_t off;
2434
2435 for (i = 0, off = bno; i < nmap; i++) {
2436 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2437 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2438 return 0;
2439 }
2440 if (off != mapp[i].br_startoff) {
2441 return 0;
2442 }
2443 off += mapp[i].br_blockcount;
2444 }
2445 return off == bno + count;
2446 }
2447
2448 /*
2449 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2450 *
2451 * For the single map case, it is assumed that the caller has provided a pointer
2452 * to a valid xfs_buf_map. For the multiple map case, this function will
2453 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2454 * map pointer with the allocated map.
2455 */
2456 static int
2457 xfs_buf_map_from_irec(
2458 struct xfs_mount *mp,
2459 struct xfs_buf_map **mapp,
2460 int *nmaps,
2461 struct xfs_bmbt_irec *irecs,
2462 int nirecs)
2463 {
2464 struct xfs_buf_map *map;
2465 int i;
2466
2467 ASSERT(*nmaps == 1);
2468 ASSERT(nirecs >= 1);
2469
2470 if (nirecs > 1) {
2471 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2472 KM_SLEEP | KM_NOFS);
2473 if (!map)
2474 return ENOMEM;
2475 *mapp = map;
2476 }
2477
2478 *nmaps = nirecs;
2479 map = *mapp;
2480 for (i = 0; i < *nmaps; i++) {
2481 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2482 irecs[i].br_startblock != HOLESTARTBLOCK);
2483 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2484 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2485 }
2486 return 0;
2487 }
2488
2489 /*
2490 * Map the block we are given ready for reading. There are three possible return
2491 * values:
2492 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2493 * caller knows not to execute a subsequent read.
2494 * 0 - if we mapped the block successfully
2495 * >0 - positive error number if there was an error.
2496 */
2497 static int
2498 xfs_dabuf_map(
2499 struct xfs_trans *trans,
2500 struct xfs_inode *dp,
2501 xfs_dablk_t bno,
2502 xfs_daddr_t mappedbno,
2503 int whichfork,
2504 struct xfs_buf_map **map,
2505 int *nmaps)
2506 {
2507 struct xfs_mount *mp = dp->i_mount;
2508 int nfsb;
2509 int error = 0;
2510 struct xfs_bmbt_irec irec;
2511 struct xfs_bmbt_irec *irecs = &irec;
2512 int nirecs;
2513
2514 ASSERT(map && *map);
2515 ASSERT(*nmaps == 1);
2516
2517 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
2518
2519 /*
2520 * Caller doesn't have a mapping. -2 means don't complain
2521 * if we land in a hole.
2522 */
2523 if (mappedbno == -1 || mappedbno == -2) {
2524 /*
2525 * Optimize the one-block case.
2526 */
2527 if (nfsb != 1)
2528 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2529 KM_SLEEP | KM_NOFS);
2530
2531 nirecs = nfsb;
2532 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2533 &nirecs, xfs_bmapi_aflag(whichfork));
2534 if (error)
2535 goto out;
2536 } else {
2537 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2538 irecs->br_startoff = (xfs_fileoff_t)bno;
2539 irecs->br_blockcount = nfsb;
2540 irecs->br_state = 0;
2541 nirecs = 1;
2542 }
2543
2544 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2545 error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
2546 if (unlikely(error == EFSCORRUPTED)) {
2547 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2548 int i;
2549 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2550 __func__, (long long)bno,
2551 (long long)dp->i_ino);
2552 for (i = 0; i < *nmaps; i++) {
2553 xfs_alert(mp,
2554 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2555 i,
2556 (long long)irecs[i].br_startoff,
2557 (long long)irecs[i].br_startblock,
2558 (long long)irecs[i].br_blockcount,
2559 irecs[i].br_state);
2560 }
2561 }
2562 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2563 XFS_ERRLEVEL_LOW, mp);
2564 }
2565 goto out;
2566 }
2567 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2568 out:
2569 if (irecs != &irec)
2570 kmem_free(irecs);
2571 return error;
2572 }
2573
2574 /*
2575 * Get a buffer for the dir/attr block.
2576 */
2577 int
2578 xfs_da_get_buf(
2579 struct xfs_trans *trans,
2580 struct xfs_inode *dp,
2581 xfs_dablk_t bno,
2582 xfs_daddr_t mappedbno,
2583 struct xfs_buf **bpp,
2584 int whichfork)
2585 {
2586 struct xfs_buf *bp;
2587 struct xfs_buf_map map;
2588 struct xfs_buf_map *mapp;
2589 int nmap;
2590 int error;
2591
2592 *bpp = NULL;
2593 mapp = &map;
2594 nmap = 1;
2595 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2596 &mapp, &nmap);
2597 if (error) {
2598 /* mapping a hole is not an error, but we don't continue */
2599 if (error == -1)
2600 error = 0;
2601 goto out_free;
2602 }
2603
2604 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2605 mapp, nmap, 0);
2606 error = bp ? bp->b_error : XFS_ERROR(EIO);
2607 if (error) {
2608 xfs_trans_brelse(trans, bp);
2609 goto out_free;
2610 }
2611
2612 *bpp = bp;
2613
2614 out_free:
2615 if (mapp != &map)
2616 kmem_free(mapp);
2617
2618 return error;
2619 }
2620
2621 /*
2622 * Get a buffer for the dir/attr block, fill in the contents.
2623 */
2624 int
2625 xfs_da_read_buf(
2626 struct xfs_trans *trans,
2627 struct xfs_inode *dp,
2628 xfs_dablk_t bno,
2629 xfs_daddr_t mappedbno,
2630 struct xfs_buf **bpp,
2631 int whichfork,
2632 const struct xfs_buf_ops *ops)
2633 {
2634 struct xfs_buf *bp;
2635 struct xfs_buf_map map;
2636 struct xfs_buf_map *mapp;
2637 int nmap;
2638 int error;
2639
2640 *bpp = NULL;
2641 mapp = &map;
2642 nmap = 1;
2643 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2644 &mapp, &nmap);
2645 if (error) {
2646 /* mapping a hole is not an error, but we don't continue */
2647 if (error == -1)
2648 error = 0;
2649 goto out_free;
2650 }
2651
2652 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2653 dp->i_mount->m_ddev_targp,
2654 mapp, nmap, 0, &bp, ops);
2655 if (error)
2656 goto out_free;
2657
2658 if (whichfork == XFS_ATTR_FORK)
2659 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2660 else
2661 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2662
2663 /*
2664 * This verification code will be moved to a CRC verification callback
2665 * function so just leave it here unchanged until then.
2666 */
2667 {
2668 xfs_dir2_data_hdr_t *hdr = bp->b_addr;
2669 xfs_dir2_free_t *free = bp->b_addr;
2670 xfs_da_blkinfo_t *info = bp->b_addr;
2671 uint magic, magic1;
2672 struct xfs_mount *mp = dp->i_mount;
2673
2674 magic = be16_to_cpu(info->magic);
2675 magic1 = be32_to_cpu(hdr->magic);
2676 if (unlikely(
2677 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2678 (magic != XFS_DA3_NODE_MAGIC) &&
2679 (magic != XFS_ATTR_LEAF_MAGIC) &&
2680 (magic != XFS_ATTR3_LEAF_MAGIC) &&
2681 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2682 (magic != XFS_DIR3_LEAF1_MAGIC) &&
2683 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2684 (magic != XFS_DIR3_LEAFN_MAGIC) &&
2685 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2686 (magic1 != XFS_DIR3_BLOCK_MAGIC) &&
2687 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2688 (magic1 != XFS_DIR3_DATA_MAGIC) &&
2689 (free->hdr.magic !=
2690 cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
2691 (free->hdr.magic !=
2692 cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
2693 mp, XFS_ERRTAG_DA_READ_BUF,
2694 XFS_RANDOM_DA_READ_BUF))) {
2695 trace_xfs_da_btree_corrupt(bp, _RET_IP_);
2696 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2697 XFS_ERRLEVEL_LOW, mp, info);
2698 error = XFS_ERROR(EFSCORRUPTED);
2699 xfs_trans_brelse(trans, bp);
2700 goto out_free;
2701 }
2702 }
2703 *bpp = bp;
2704 out_free:
2705 if (mapp != &map)
2706 kmem_free(mapp);
2707
2708 return error;
2709 }
2710
2711 /*
2712 * Readahead the dir/attr block.
2713 */
2714 xfs_daddr_t
2715 xfs_da_reada_buf(
2716 struct xfs_trans *trans,
2717 struct xfs_inode *dp,
2718 xfs_dablk_t bno,
2719 xfs_daddr_t mappedbno,
2720 int whichfork,
2721 const struct xfs_buf_ops *ops)
2722 {
2723 struct xfs_buf_map map;
2724 struct xfs_buf_map *mapp;
2725 int nmap;
2726 int error;
2727
2728 mapp = &map;
2729 nmap = 1;
2730 error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
2731 &mapp, &nmap);
2732 if (error) {
2733 /* mapping a hole is not an error, but we don't continue */
2734 if (error == -1)
2735 error = 0;
2736 goto out_free;
2737 }
2738
2739 mappedbno = mapp[0].bm_bn;
2740 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2741
2742 out_free:
2743 if (mapp != &map)
2744 kmem_free(mapp);
2745
2746 if (error)
2747 return -1;
2748 return mappedbno;
2749 }
This page took 0.089244 seconds and 5 git commands to generate.