Drop locks in btrfs_search_slot when reading a tree block.
[deliverable/linux.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "print-tree.h"
24 #include "locking.h"
25
26 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
27 *root, struct btrfs_path *path, int level);
28 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_key *ins_key,
30 struct btrfs_path *path, int data_size, int extend);
31 static int push_node_left(struct btrfs_trans_handle *trans,
32 struct btrfs_root *root, struct extent_buffer *dst,
33 struct extent_buffer *src, int empty);
34 static int balance_node_right(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 struct extent_buffer *dst_buf,
37 struct extent_buffer *src_buf);
38 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
39 struct btrfs_path *path, int level, int slot);
40
41 inline void btrfs_init_path(struct btrfs_path *p)
42 {
43 memset(p, 0, sizeof(*p));
44 }
45
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 struct btrfs_path *path;
49 path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
50 if (path) {
51 btrfs_init_path(path);
52 path->reada = 1;
53 }
54 return path;
55 }
56
57 void btrfs_free_path(struct btrfs_path *p)
58 {
59 btrfs_release_path(NULL, p);
60 kmem_cache_free(btrfs_path_cachep, p);
61 }
62
63 void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
64 {
65 int i;
66 int keep = p->keep_locks;
67
68 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
69 if (!p->nodes[i])
70 continue;
71 if (p->locks[i]) {
72 btrfs_tree_unlock(p->nodes[i]);
73 p->locks[i] = 0;
74 }
75 free_extent_buffer(p->nodes[i]);
76 }
77 memset(p, 0, sizeof(*p));
78 p->keep_locks = keep;
79 }
80
81 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
82 {
83 struct extent_buffer *eb;
84 spin_lock(&root->node_lock);
85 eb = root->node;
86 extent_buffer_get(eb);
87 spin_unlock(&root->node_lock);
88 return eb;
89 }
90
91 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
92 {
93 struct extent_buffer *eb;
94
95 while(1) {
96 eb = btrfs_root_node(root);
97 btrfs_tree_lock(eb);
98
99 spin_lock(&root->node_lock);
100 if (eb == root->node) {
101 spin_unlock(&root->node_lock);
102 break;
103 }
104 spin_unlock(&root->node_lock);
105
106 btrfs_tree_unlock(eb);
107 free_extent_buffer(eb);
108 }
109 return eb;
110 }
111
112 static void add_root_to_dirty_list(struct btrfs_root *root)
113 {
114 if (root->track_dirty && list_empty(&root->dirty_list)) {
115 list_add(&root->dirty_list,
116 &root->fs_info->dirty_cowonly_roots);
117 }
118 }
119
120 int btrfs_copy_root(struct btrfs_trans_handle *trans,
121 struct btrfs_root *root,
122 struct extent_buffer *buf,
123 struct extent_buffer **cow_ret, u64 new_root_objectid)
124 {
125 struct extent_buffer *cow;
126 u32 nritems;
127 int ret = 0;
128 int level;
129 struct btrfs_key first_key;
130 struct btrfs_root *new_root;
131
132 new_root = kmalloc(sizeof(*new_root), GFP_NOFS);
133 if (!new_root)
134 return -ENOMEM;
135
136 memcpy(new_root, root, sizeof(*new_root));
137 new_root->root_key.objectid = new_root_objectid;
138
139 WARN_ON(root->ref_cows && trans->transid !=
140 root->fs_info->running_transaction->transid);
141 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
142
143 level = btrfs_header_level(buf);
144 nritems = btrfs_header_nritems(buf);
145 if (nritems) {
146 if (level == 0)
147 btrfs_item_key_to_cpu(buf, &first_key, 0);
148 else
149 btrfs_node_key_to_cpu(buf, &first_key, 0);
150 } else {
151 first_key.objectid = 0;
152 }
153 cow = btrfs_alloc_free_block(trans, new_root, buf->len,
154 new_root_objectid,
155 trans->transid, first_key.objectid,
156 level, buf->start, 0);
157 if (IS_ERR(cow)) {
158 kfree(new_root);
159 return PTR_ERR(cow);
160 }
161
162 copy_extent_buffer(cow, buf, 0, 0, cow->len);
163 btrfs_set_header_bytenr(cow, cow->start);
164 btrfs_set_header_generation(cow, trans->transid);
165 btrfs_set_header_owner(cow, new_root_objectid);
166 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
167
168 WARN_ON(btrfs_header_generation(buf) > trans->transid);
169 ret = btrfs_inc_ref(trans, new_root, buf);
170 kfree(new_root);
171
172 if (ret)
173 return ret;
174
175 btrfs_mark_buffer_dirty(cow);
176 *cow_ret = cow;
177 return 0;
178 }
179
180 int __btrfs_cow_block(struct btrfs_trans_handle *trans,
181 struct btrfs_root *root,
182 struct extent_buffer *buf,
183 struct extent_buffer *parent, int parent_slot,
184 struct extent_buffer **cow_ret,
185 u64 search_start, u64 empty_size)
186 {
187 u64 root_gen;
188 struct extent_buffer *cow;
189 u32 nritems;
190 int ret = 0;
191 int different_trans = 0;
192 int level;
193 int unlock_orig = 0;
194 struct btrfs_key first_key;
195
196 if (*cow_ret == buf)
197 unlock_orig = 1;
198
199 WARN_ON(!btrfs_tree_locked(buf));
200
201 if (root->ref_cows) {
202 root_gen = trans->transid;
203 } else {
204 root_gen = 0;
205 }
206 WARN_ON(root->ref_cows && trans->transid !=
207 root->fs_info->running_transaction->transid);
208 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
209
210 level = btrfs_header_level(buf);
211 nritems = btrfs_header_nritems(buf);
212 if (nritems) {
213 if (level == 0)
214 btrfs_item_key_to_cpu(buf, &first_key, 0);
215 else
216 btrfs_node_key_to_cpu(buf, &first_key, 0);
217 } else {
218 first_key.objectid = 0;
219 }
220 cow = btrfs_alloc_free_block(trans, root, buf->len,
221 root->root_key.objectid,
222 root_gen, first_key.objectid, level,
223 search_start, empty_size);
224 if (IS_ERR(cow))
225 return PTR_ERR(cow);
226
227 copy_extent_buffer(cow, buf, 0, 0, cow->len);
228 btrfs_set_header_bytenr(cow, cow->start);
229 btrfs_set_header_generation(cow, trans->transid);
230 btrfs_set_header_owner(cow, root->root_key.objectid);
231 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN);
232
233 WARN_ON(btrfs_header_generation(buf) > trans->transid);
234 if (btrfs_header_generation(buf) != trans->transid) {
235 different_trans = 1;
236 ret = btrfs_inc_ref(trans, root, buf);
237 if (ret)
238 return ret;
239 } else {
240 clean_tree_block(trans, root, buf);
241 }
242
243 if (buf == root->node) {
244 WARN_ON(parent && parent != buf);
245 root_gen = btrfs_header_generation(buf);
246
247 spin_lock(&root->node_lock);
248 root->node = cow;
249 extent_buffer_get(cow);
250 spin_unlock(&root->node_lock);
251
252 if (buf != root->commit_root) {
253 btrfs_free_extent(trans, root, buf->start,
254 buf->len, root->root_key.objectid,
255 root_gen, 0, 0, 1);
256 }
257 free_extent_buffer(buf);
258 add_root_to_dirty_list(root);
259 } else {
260 root_gen = btrfs_header_generation(parent);
261 btrfs_set_node_blockptr(parent, parent_slot,
262 cow->start);
263 WARN_ON(trans->transid == 0);
264 btrfs_set_node_ptr_generation(parent, parent_slot,
265 trans->transid);
266 btrfs_mark_buffer_dirty(parent);
267 WARN_ON(btrfs_header_generation(parent) != trans->transid);
268 btrfs_free_extent(trans, root, buf->start, buf->len,
269 btrfs_header_owner(parent), root_gen,
270 0, 0, 1);
271 }
272 if (unlock_orig)
273 btrfs_tree_unlock(buf);
274 free_extent_buffer(buf);
275 btrfs_mark_buffer_dirty(cow);
276 *cow_ret = cow;
277 return 0;
278 }
279
280 int btrfs_cow_block(struct btrfs_trans_handle *trans,
281 struct btrfs_root *root, struct extent_buffer *buf,
282 struct extent_buffer *parent, int parent_slot,
283 struct extent_buffer **cow_ret)
284 {
285 u64 search_start;
286 u64 header_trans;
287 int ret;
288
289 if (trans->transaction != root->fs_info->running_transaction) {
290 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
291 root->fs_info->running_transaction->transid);
292 WARN_ON(1);
293 }
294 if (trans->transid != root->fs_info->generation) {
295 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
296 root->fs_info->generation);
297 WARN_ON(1);
298 }
299
300 header_trans = btrfs_header_generation(buf);
301 spin_lock(&root->fs_info->hash_lock);
302 if (header_trans == trans->transid &&
303 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
304 *cow_ret = buf;
305 spin_unlock(&root->fs_info->hash_lock);
306 return 0;
307 }
308 spin_unlock(&root->fs_info->hash_lock);
309 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
310 ret = __btrfs_cow_block(trans, root, buf, parent,
311 parent_slot, cow_ret, search_start, 0);
312 return ret;
313 }
314
315 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
316 {
317 if (blocknr < other && other - (blocknr + blocksize) < 32768)
318 return 1;
319 if (blocknr > other && blocknr - (other + blocksize) < 32768)
320 return 1;
321 return 0;
322 }
323
324 /*
325 * compare two keys in a memcmp fashion
326 */
327 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
328 {
329 struct btrfs_key k1;
330
331 btrfs_disk_key_to_cpu(&k1, disk);
332
333 if (k1.objectid > k2->objectid)
334 return 1;
335 if (k1.objectid < k2->objectid)
336 return -1;
337 if (k1.type > k2->type)
338 return 1;
339 if (k1.type < k2->type)
340 return -1;
341 if (k1.offset > k2->offset)
342 return 1;
343 if (k1.offset < k2->offset)
344 return -1;
345 return 0;
346 }
347
348
349 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
350 struct btrfs_root *root, struct extent_buffer *parent,
351 int start_slot, int cache_only, u64 *last_ret,
352 struct btrfs_key *progress)
353 {
354 struct extent_buffer *cur;
355 struct extent_buffer *tmp;
356 u64 blocknr;
357 u64 gen;
358 u64 search_start = *last_ret;
359 u64 last_block = 0;
360 u64 other;
361 u32 parent_nritems;
362 int end_slot;
363 int i;
364 int err = 0;
365 int parent_level;
366 int uptodate;
367 u32 blocksize;
368 int progress_passed = 0;
369 struct btrfs_disk_key disk_key;
370
371 /* FIXME this code needs locking */
372 return 0;
373
374 parent_level = btrfs_header_level(parent);
375 if (cache_only && parent_level != 1)
376 return 0;
377
378 if (trans->transaction != root->fs_info->running_transaction) {
379 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
380 root->fs_info->running_transaction->transid);
381 WARN_ON(1);
382 }
383 if (trans->transid != root->fs_info->generation) {
384 printk(KERN_CRIT "trans %Lu running %Lu\n", trans->transid,
385 root->fs_info->generation);
386 WARN_ON(1);
387 }
388
389 parent_nritems = btrfs_header_nritems(parent);
390 blocksize = btrfs_level_size(root, parent_level - 1);
391 end_slot = parent_nritems;
392
393 if (parent_nritems == 1)
394 return 0;
395
396 for (i = start_slot; i < end_slot; i++) {
397 int close = 1;
398
399 if (!parent->map_token) {
400 map_extent_buffer(parent,
401 btrfs_node_key_ptr_offset(i),
402 sizeof(struct btrfs_key_ptr),
403 &parent->map_token, &parent->kaddr,
404 &parent->map_start, &parent->map_len,
405 KM_USER1);
406 }
407 btrfs_node_key(parent, &disk_key, i);
408 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
409 continue;
410
411 progress_passed = 1;
412 blocknr = btrfs_node_blockptr(parent, i);
413 gen = btrfs_node_ptr_generation(parent, i);
414 if (last_block == 0)
415 last_block = blocknr;
416
417 if (i > 0) {
418 other = btrfs_node_blockptr(parent, i - 1);
419 close = close_blocks(blocknr, other, blocksize);
420 }
421 if (!close && i < end_slot - 2) {
422 other = btrfs_node_blockptr(parent, i + 1);
423 close = close_blocks(blocknr, other, blocksize);
424 }
425 if (close) {
426 last_block = blocknr;
427 continue;
428 }
429 if (parent->map_token) {
430 unmap_extent_buffer(parent, parent->map_token,
431 KM_USER1);
432 parent->map_token = NULL;
433 }
434
435 cur = btrfs_find_tree_block(root, blocknr, blocksize);
436 if (cur)
437 uptodate = btrfs_buffer_uptodate(cur, gen);
438 else
439 uptodate = 0;
440 if (!cur || !uptodate) {
441 if (cache_only) {
442 free_extent_buffer(cur);
443 continue;
444 }
445 if (!cur) {
446 cur = read_tree_block(root, blocknr,
447 blocksize, gen);
448 } else if (!uptodate) {
449 btrfs_read_buffer(cur, gen);
450 }
451 }
452 if (search_start == 0)
453 search_start = last_block;
454
455 err = __btrfs_cow_block(trans, root, cur, parent, i,
456 &tmp, search_start,
457 min(16 * blocksize,
458 (end_slot - i) * blocksize));
459 if (err) {
460 free_extent_buffer(cur);
461 break;
462 }
463 search_start = tmp->start;
464 last_block = tmp->start;
465 *last_ret = search_start;
466 if (parent_level == 1)
467 btrfs_clear_buffer_defrag(tmp);
468 free_extent_buffer(tmp);
469 }
470 if (parent->map_token) {
471 unmap_extent_buffer(parent, parent->map_token,
472 KM_USER1);
473 parent->map_token = NULL;
474 }
475 return err;
476 }
477
478 /*
479 * The leaf data grows from end-to-front in the node.
480 * this returns the address of the start of the last item,
481 * which is the stop of the leaf data stack
482 */
483 static inline unsigned int leaf_data_end(struct btrfs_root *root,
484 struct extent_buffer *leaf)
485 {
486 u32 nr = btrfs_header_nritems(leaf);
487 if (nr == 0)
488 return BTRFS_LEAF_DATA_SIZE(root);
489 return btrfs_item_offset_nr(leaf, nr - 1);
490 }
491
492 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
493 int level)
494 {
495 struct extent_buffer *parent = NULL;
496 struct extent_buffer *node = path->nodes[level];
497 struct btrfs_disk_key parent_key;
498 struct btrfs_disk_key node_key;
499 int parent_slot;
500 int slot;
501 struct btrfs_key cpukey;
502 u32 nritems = btrfs_header_nritems(node);
503
504 if (path->nodes[level + 1])
505 parent = path->nodes[level + 1];
506
507 slot = path->slots[level];
508 BUG_ON(nritems == 0);
509 if (parent) {
510 parent_slot = path->slots[level + 1];
511 btrfs_node_key(parent, &parent_key, parent_slot);
512 btrfs_node_key(node, &node_key, 0);
513 BUG_ON(memcmp(&parent_key, &node_key,
514 sizeof(struct btrfs_disk_key)));
515 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
516 btrfs_header_bytenr(node));
517 }
518 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
519 if (slot != 0) {
520 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
521 btrfs_node_key(node, &node_key, slot);
522 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
523 }
524 if (slot < nritems - 1) {
525 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
526 btrfs_node_key(node, &node_key, slot);
527 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
528 }
529 return 0;
530 }
531
532 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
533 int level)
534 {
535 struct extent_buffer *leaf = path->nodes[level];
536 struct extent_buffer *parent = NULL;
537 int parent_slot;
538 struct btrfs_key cpukey;
539 struct btrfs_disk_key parent_key;
540 struct btrfs_disk_key leaf_key;
541 int slot = path->slots[0];
542
543 u32 nritems = btrfs_header_nritems(leaf);
544
545 if (path->nodes[level + 1])
546 parent = path->nodes[level + 1];
547
548 if (nritems == 0)
549 return 0;
550
551 if (parent) {
552 parent_slot = path->slots[level + 1];
553 btrfs_node_key(parent, &parent_key, parent_slot);
554 btrfs_item_key(leaf, &leaf_key, 0);
555
556 BUG_ON(memcmp(&parent_key, &leaf_key,
557 sizeof(struct btrfs_disk_key)));
558 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
559 btrfs_header_bytenr(leaf));
560 }
561 #if 0
562 for (i = 0; nritems > 1 && i < nritems - 2; i++) {
563 btrfs_item_key_to_cpu(leaf, &cpukey, i + 1);
564 btrfs_item_key(leaf, &leaf_key, i);
565 if (comp_keys(&leaf_key, &cpukey) >= 0) {
566 btrfs_print_leaf(root, leaf);
567 printk("slot %d offset bad key\n", i);
568 BUG_ON(1);
569 }
570 if (btrfs_item_offset_nr(leaf, i) !=
571 btrfs_item_end_nr(leaf, i + 1)) {
572 btrfs_print_leaf(root, leaf);
573 printk("slot %d offset bad\n", i);
574 BUG_ON(1);
575 }
576 if (i == 0) {
577 if (btrfs_item_offset_nr(leaf, i) +
578 btrfs_item_size_nr(leaf, i) !=
579 BTRFS_LEAF_DATA_SIZE(root)) {
580 btrfs_print_leaf(root, leaf);
581 printk("slot %d first offset bad\n", i);
582 BUG_ON(1);
583 }
584 }
585 }
586 if (nritems > 0) {
587 if (btrfs_item_size_nr(leaf, nritems - 1) > 4096) {
588 btrfs_print_leaf(root, leaf);
589 printk("slot %d bad size \n", nritems - 1);
590 BUG_ON(1);
591 }
592 }
593 #endif
594 if (slot != 0 && slot < nritems - 1) {
595 btrfs_item_key(leaf, &leaf_key, slot);
596 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
597 if (comp_keys(&leaf_key, &cpukey) <= 0) {
598 btrfs_print_leaf(root, leaf);
599 printk("slot %d offset bad key\n", slot);
600 BUG_ON(1);
601 }
602 if (btrfs_item_offset_nr(leaf, slot - 1) !=
603 btrfs_item_end_nr(leaf, slot)) {
604 btrfs_print_leaf(root, leaf);
605 printk("slot %d offset bad\n", slot);
606 BUG_ON(1);
607 }
608 }
609 if (slot < nritems - 1) {
610 btrfs_item_key(leaf, &leaf_key, slot);
611 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
612 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
613 if (btrfs_item_offset_nr(leaf, slot) !=
614 btrfs_item_end_nr(leaf, slot + 1)) {
615 btrfs_print_leaf(root, leaf);
616 printk("slot %d offset bad\n", slot);
617 BUG_ON(1);
618 }
619 }
620 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
621 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
622 return 0;
623 }
624
625 static int noinline check_block(struct btrfs_root *root,
626 struct btrfs_path *path, int level)
627 {
628 u64 found_start;
629 return 0;
630 if (btrfs_header_level(path->nodes[level]) != level)
631 printk("warning: bad level %Lu wanted %d found %d\n",
632 path->nodes[level]->start, level,
633 btrfs_header_level(path->nodes[level]));
634 found_start = btrfs_header_bytenr(path->nodes[level]);
635 if (found_start != path->nodes[level]->start) {
636 printk("warning: bad bytentr %Lu found %Lu\n",
637 path->nodes[level]->start, found_start);
638 }
639 #if 0
640 struct extent_buffer *buf = path->nodes[level];
641
642 if (memcmp_extent_buffer(buf, root->fs_info->fsid,
643 (unsigned long)btrfs_header_fsid(buf),
644 BTRFS_FSID_SIZE)) {
645 printk("warning bad block %Lu\n", buf->start);
646 return 1;
647 }
648 #endif
649 if (level == 0)
650 return check_leaf(root, path, level);
651 return check_node(root, path, level);
652 }
653
654 /*
655 * search for key in the extent_buffer. The items start at offset p,
656 * and they are item_size apart. There are 'max' items in p.
657 *
658 * the slot in the array is returned via slot, and it points to
659 * the place where you would insert key if it is not found in
660 * the array.
661 *
662 * slot may point to max if the key is bigger than all of the keys
663 */
664 static int generic_bin_search(struct extent_buffer *eb, unsigned long p,
665 int item_size, struct btrfs_key *key,
666 int max, int *slot)
667 {
668 int low = 0;
669 int high = max;
670 int mid;
671 int ret;
672 struct btrfs_disk_key *tmp = NULL;
673 struct btrfs_disk_key unaligned;
674 unsigned long offset;
675 char *map_token = NULL;
676 char *kaddr = NULL;
677 unsigned long map_start = 0;
678 unsigned long map_len = 0;
679 int err;
680
681 while(low < high) {
682 mid = (low + high) / 2;
683 offset = p + mid * item_size;
684
685 if (!map_token || offset < map_start ||
686 (offset + sizeof(struct btrfs_disk_key)) >
687 map_start + map_len) {
688 if (map_token) {
689 unmap_extent_buffer(eb, map_token, KM_USER0);
690 map_token = NULL;
691 }
692 err = map_extent_buffer(eb, offset,
693 sizeof(struct btrfs_disk_key),
694 &map_token, &kaddr,
695 &map_start, &map_len, KM_USER0);
696
697 if (!err) {
698 tmp = (struct btrfs_disk_key *)(kaddr + offset -
699 map_start);
700 } else {
701 read_extent_buffer(eb, &unaligned,
702 offset, sizeof(unaligned));
703 tmp = &unaligned;
704 }
705
706 } else {
707 tmp = (struct btrfs_disk_key *)(kaddr + offset -
708 map_start);
709 }
710 ret = comp_keys(tmp, key);
711
712 if (ret < 0)
713 low = mid + 1;
714 else if (ret > 0)
715 high = mid;
716 else {
717 *slot = mid;
718 if (map_token)
719 unmap_extent_buffer(eb, map_token, KM_USER0);
720 return 0;
721 }
722 }
723 *slot = low;
724 if (map_token)
725 unmap_extent_buffer(eb, map_token, KM_USER0);
726 return 1;
727 }
728
729 /*
730 * simple bin_search frontend that does the right thing for
731 * leaves vs nodes
732 */
733 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
734 int level, int *slot)
735 {
736 if (level == 0) {
737 return generic_bin_search(eb,
738 offsetof(struct btrfs_leaf, items),
739 sizeof(struct btrfs_item),
740 key, btrfs_header_nritems(eb),
741 slot);
742 } else {
743 return generic_bin_search(eb,
744 offsetof(struct btrfs_node, ptrs),
745 sizeof(struct btrfs_key_ptr),
746 key, btrfs_header_nritems(eb),
747 slot);
748 }
749 return -1;
750 }
751
752 static struct extent_buffer *read_node_slot(struct btrfs_root *root,
753 struct extent_buffer *parent, int slot)
754 {
755 int level = btrfs_header_level(parent);
756 if (slot < 0)
757 return NULL;
758 if (slot >= btrfs_header_nritems(parent))
759 return NULL;
760
761 BUG_ON(level == 0);
762
763 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
764 btrfs_level_size(root, level - 1),
765 btrfs_node_ptr_generation(parent, slot));
766 }
767
768 static int balance_level(struct btrfs_trans_handle *trans,
769 struct btrfs_root *root,
770 struct btrfs_path *path, int level)
771 {
772 struct extent_buffer *right = NULL;
773 struct extent_buffer *mid;
774 struct extent_buffer *left = NULL;
775 struct extent_buffer *parent = NULL;
776 int ret = 0;
777 int wret;
778 int pslot;
779 int orig_slot = path->slots[level];
780 int err_on_enospc = 0;
781 u64 orig_ptr;
782
783 if (level == 0)
784 return 0;
785
786 mid = path->nodes[level];
787 WARN_ON(!path->locks[level]);
788 WARN_ON(btrfs_header_generation(mid) != trans->transid);
789
790 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
791
792 if (level < BTRFS_MAX_LEVEL - 1)
793 parent = path->nodes[level + 1];
794 pslot = path->slots[level + 1];
795
796 /*
797 * deal with the case where there is only one pointer in the root
798 * by promoting the node below to a root
799 */
800 if (!parent) {
801 struct extent_buffer *child;
802
803 if (btrfs_header_nritems(mid) != 1)
804 return 0;
805
806 /* promote the child to a root */
807 child = read_node_slot(root, mid, 0);
808 btrfs_tree_lock(child);
809 BUG_ON(!child);
810 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
811 BUG_ON(ret);
812
813 spin_lock(&root->node_lock);
814 root->node = child;
815 spin_unlock(&root->node_lock);
816
817 add_root_to_dirty_list(root);
818 btrfs_tree_unlock(child);
819 path->locks[level] = 0;
820 path->nodes[level] = NULL;
821 clean_tree_block(trans, root, mid);
822 btrfs_tree_unlock(mid);
823 /* once for the path */
824 free_extent_buffer(mid);
825 ret = btrfs_free_extent(trans, root, mid->start, mid->len,
826 root->root_key.objectid,
827 btrfs_header_generation(mid), 0, 0, 1);
828 /* once for the root ptr */
829 free_extent_buffer(mid);
830 return ret;
831 }
832 if (btrfs_header_nritems(mid) >
833 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
834 return 0;
835
836 if (btrfs_header_nritems(mid) < 2)
837 err_on_enospc = 1;
838
839 left = read_node_slot(root, parent, pslot - 1);
840 if (left) {
841 btrfs_tree_lock(left);
842 wret = btrfs_cow_block(trans, root, left,
843 parent, pslot - 1, &left);
844 if (wret) {
845 ret = wret;
846 goto enospc;
847 }
848 }
849 right = read_node_slot(root, parent, pslot + 1);
850 if (right) {
851 btrfs_tree_lock(right);
852 wret = btrfs_cow_block(trans, root, right,
853 parent, pslot + 1, &right);
854 if (wret) {
855 ret = wret;
856 goto enospc;
857 }
858 }
859
860 /* first, try to make some room in the middle buffer */
861 if (left) {
862 orig_slot += btrfs_header_nritems(left);
863 wret = push_node_left(trans, root, left, mid, 1);
864 if (wret < 0)
865 ret = wret;
866 if (btrfs_header_nritems(mid) < 2)
867 err_on_enospc = 1;
868 }
869
870 /*
871 * then try to empty the right most buffer into the middle
872 */
873 if (right) {
874 wret = push_node_left(trans, root, mid, right, 1);
875 if (wret < 0 && wret != -ENOSPC)
876 ret = wret;
877 if (btrfs_header_nritems(right) == 0) {
878 u64 bytenr = right->start;
879 u64 generation = btrfs_header_generation(parent);
880 u32 blocksize = right->len;
881
882 clean_tree_block(trans, root, right);
883 btrfs_tree_unlock(right);
884 free_extent_buffer(right);
885 right = NULL;
886 wret = del_ptr(trans, root, path, level + 1, pslot +
887 1);
888 if (wret)
889 ret = wret;
890 wret = btrfs_free_extent(trans, root, bytenr,
891 blocksize,
892 btrfs_header_owner(parent),
893 generation, 0, 0, 1);
894 if (wret)
895 ret = wret;
896 } else {
897 struct btrfs_disk_key right_key;
898 btrfs_node_key(right, &right_key, 0);
899 btrfs_set_node_key(parent, &right_key, pslot + 1);
900 btrfs_mark_buffer_dirty(parent);
901 }
902 }
903 if (btrfs_header_nritems(mid) == 1) {
904 /*
905 * we're not allowed to leave a node with one item in the
906 * tree during a delete. A deletion from lower in the tree
907 * could try to delete the only pointer in this node.
908 * So, pull some keys from the left.
909 * There has to be a left pointer at this point because
910 * otherwise we would have pulled some pointers from the
911 * right
912 */
913 BUG_ON(!left);
914 wret = balance_node_right(trans, root, mid, left);
915 if (wret < 0) {
916 ret = wret;
917 goto enospc;
918 }
919 if (wret == 1) {
920 wret = push_node_left(trans, root, left, mid, 1);
921 if (wret < 0)
922 ret = wret;
923 }
924 BUG_ON(wret == 1);
925 }
926 if (btrfs_header_nritems(mid) == 0) {
927 /* we've managed to empty the middle node, drop it */
928 u64 root_gen = btrfs_header_generation(parent);
929 u64 bytenr = mid->start;
930 u32 blocksize = mid->len;
931
932 clean_tree_block(trans, root, mid);
933 btrfs_tree_unlock(mid);
934 free_extent_buffer(mid);
935 mid = NULL;
936 wret = del_ptr(trans, root, path, level + 1, pslot);
937 if (wret)
938 ret = wret;
939 wret = btrfs_free_extent(trans, root, bytenr, blocksize,
940 btrfs_header_owner(parent),
941 root_gen, 0, 0, 1);
942 if (wret)
943 ret = wret;
944 } else {
945 /* update the parent key to reflect our changes */
946 struct btrfs_disk_key mid_key;
947 btrfs_node_key(mid, &mid_key, 0);
948 btrfs_set_node_key(parent, &mid_key, pslot);
949 btrfs_mark_buffer_dirty(parent);
950 }
951
952 /* update the path */
953 if (left) {
954 if (btrfs_header_nritems(left) > orig_slot) {
955 extent_buffer_get(left);
956 /* left was locked after cow */
957 path->nodes[level] = left;
958 path->slots[level + 1] -= 1;
959 path->slots[level] = orig_slot;
960 if (mid) {
961 btrfs_tree_unlock(mid);
962 free_extent_buffer(mid);
963 }
964 } else {
965 orig_slot -= btrfs_header_nritems(left);
966 path->slots[level] = orig_slot;
967 }
968 }
969 /* double check we haven't messed things up */
970 check_block(root, path, level);
971 if (orig_ptr !=
972 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
973 BUG();
974 enospc:
975 if (right) {
976 btrfs_tree_unlock(right);
977 free_extent_buffer(right);
978 }
979 if (left) {
980 if (path->nodes[level] != left)
981 btrfs_tree_unlock(left);
982 free_extent_buffer(left);
983 }
984 return ret;
985 }
986
987 /* returns zero if the push worked, non-zero otherwise */
988 static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans,
989 struct btrfs_root *root,
990 struct btrfs_path *path, int level)
991 {
992 struct extent_buffer *right = NULL;
993 struct extent_buffer *mid;
994 struct extent_buffer *left = NULL;
995 struct extent_buffer *parent = NULL;
996 int ret = 0;
997 int wret;
998 int pslot;
999 int orig_slot = path->slots[level];
1000 u64 orig_ptr;
1001
1002 if (level == 0)
1003 return 1;
1004
1005 mid = path->nodes[level];
1006 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1007 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1008
1009 if (level < BTRFS_MAX_LEVEL - 1)
1010 parent = path->nodes[level + 1];
1011 pslot = path->slots[level + 1];
1012
1013 if (!parent)
1014 return 1;
1015
1016 left = read_node_slot(root, parent, pslot - 1);
1017
1018 /* first, try to make some room in the middle buffer */
1019 if (left) {
1020 u32 left_nr;
1021
1022 btrfs_tree_lock(left);
1023 left_nr = btrfs_header_nritems(left);
1024 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1025 wret = 1;
1026 } else {
1027 ret = btrfs_cow_block(trans, root, left, parent,
1028 pslot - 1, &left);
1029 if (ret)
1030 wret = 1;
1031 else {
1032 wret = push_node_left(trans, root,
1033 left, mid, 0);
1034 }
1035 }
1036 if (wret < 0)
1037 ret = wret;
1038 if (wret == 0) {
1039 struct btrfs_disk_key disk_key;
1040 orig_slot += left_nr;
1041 btrfs_node_key(mid, &disk_key, 0);
1042 btrfs_set_node_key(parent, &disk_key, pslot);
1043 btrfs_mark_buffer_dirty(parent);
1044 if (btrfs_header_nritems(left) > orig_slot) {
1045 path->nodes[level] = left;
1046 path->slots[level + 1] -= 1;
1047 path->slots[level] = orig_slot;
1048 btrfs_tree_unlock(mid);
1049 free_extent_buffer(mid);
1050 } else {
1051 orig_slot -=
1052 btrfs_header_nritems(left);
1053 path->slots[level] = orig_slot;
1054 btrfs_tree_unlock(left);
1055 free_extent_buffer(left);
1056 }
1057 return 0;
1058 }
1059 btrfs_tree_unlock(left);
1060 free_extent_buffer(left);
1061 }
1062 right = read_node_slot(root, parent, pslot + 1);
1063
1064 /*
1065 * then try to empty the right most buffer into the middle
1066 */
1067 if (right) {
1068 u32 right_nr;
1069 btrfs_tree_lock(right);
1070 right_nr = btrfs_header_nritems(right);
1071 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1072 wret = 1;
1073 } else {
1074 ret = btrfs_cow_block(trans, root, right,
1075 parent, pslot + 1,
1076 &right);
1077 if (ret)
1078 wret = 1;
1079 else {
1080 wret = balance_node_right(trans, root,
1081 right, mid);
1082 }
1083 }
1084 if (wret < 0)
1085 ret = wret;
1086 if (wret == 0) {
1087 struct btrfs_disk_key disk_key;
1088
1089 btrfs_node_key(right, &disk_key, 0);
1090 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1091 btrfs_mark_buffer_dirty(parent);
1092
1093 if (btrfs_header_nritems(mid) <= orig_slot) {
1094 path->nodes[level] = right;
1095 path->slots[level + 1] += 1;
1096 path->slots[level] = orig_slot -
1097 btrfs_header_nritems(mid);
1098 btrfs_tree_unlock(mid);
1099 free_extent_buffer(mid);
1100 } else {
1101 btrfs_tree_unlock(right);
1102 free_extent_buffer(right);
1103 }
1104 return 0;
1105 }
1106 btrfs_tree_unlock(right);
1107 free_extent_buffer(right);
1108 }
1109 return 1;
1110 }
1111
1112 /*
1113 * readahead one full node of leaves
1114 */
1115 static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path,
1116 int level, int slot, u64 objectid)
1117 {
1118 struct extent_buffer *node;
1119 struct btrfs_disk_key disk_key;
1120 u32 nritems;
1121 u64 search;
1122 u64 lowest_read;
1123 u64 highest_read;
1124 u64 nread = 0;
1125 int direction = path->reada;
1126 struct extent_buffer *eb;
1127 u32 nr;
1128 u32 blocksize;
1129 u32 nscan = 0;
1130
1131 if (level != 1)
1132 return;
1133
1134 if (!path->nodes[level])
1135 return;
1136
1137 node = path->nodes[level];
1138
1139 search = btrfs_node_blockptr(node, slot);
1140 blocksize = btrfs_level_size(root, level - 1);
1141 eb = btrfs_find_tree_block(root, search, blocksize);
1142 if (eb) {
1143 free_extent_buffer(eb);
1144 return;
1145 }
1146
1147 highest_read = search;
1148 lowest_read = search;
1149
1150 nritems = btrfs_header_nritems(node);
1151 nr = slot;
1152 while(1) {
1153 if (direction < 0) {
1154 if (nr == 0)
1155 break;
1156 nr--;
1157 } else if (direction > 0) {
1158 nr++;
1159 if (nr >= nritems)
1160 break;
1161 }
1162 if (path->reada < 0 && objectid) {
1163 btrfs_node_key(node, &disk_key, nr);
1164 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1165 break;
1166 }
1167 search = btrfs_node_blockptr(node, nr);
1168 if ((search >= lowest_read && search <= highest_read) ||
1169 (search < lowest_read && lowest_read - search <= 32768) ||
1170 (search > highest_read && search - highest_read <= 32768)) {
1171 readahead_tree_block(root, search, blocksize,
1172 btrfs_node_ptr_generation(node, nr));
1173 nread += blocksize;
1174 }
1175 nscan++;
1176 if (path->reada < 2 && (nread > (256 * 1024) || nscan > 32))
1177 break;
1178 if(nread > (1024 * 1024) || nscan > 128)
1179 break;
1180
1181 if (search < lowest_read)
1182 lowest_read = search;
1183 if (search > highest_read)
1184 highest_read = search;
1185 }
1186 }
1187
1188 static void unlock_up(struct btrfs_path *path, int level, int lowest_unlock)
1189 {
1190 int i;
1191 int skip_level = level;
1192 int no_skips = 0;
1193 struct extent_buffer *t;
1194
1195 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1196 if (!path->nodes[i])
1197 break;
1198 if (!path->locks[i])
1199 break;
1200 if (!no_skips && path->slots[i] == 0) {
1201 skip_level = i + 1;
1202 continue;
1203 }
1204 if (!no_skips && path->keep_locks) {
1205 u32 nritems;
1206 t = path->nodes[i];
1207 nritems = btrfs_header_nritems(t);
1208 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1209 skip_level = i + 1;
1210 continue;
1211 }
1212 }
1213 if (skip_level < i && i >= lowest_unlock)
1214 no_skips = 1;
1215
1216 t = path->nodes[i];
1217 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1218 btrfs_tree_unlock(t);
1219 path->locks[i] = 0;
1220 }
1221 }
1222 }
1223
1224 /*
1225 * look for key in the tree. path is filled in with nodes along the way
1226 * if key is found, we return zero and you can find the item in the leaf
1227 * level of the path (level 0)
1228 *
1229 * If the key isn't found, the path points to the slot where it should
1230 * be inserted, and 1 is returned. If there are other errors during the
1231 * search a negative error number is returned.
1232 *
1233 * if ins_len > 0, nodes and leaves will be split as we walk down the
1234 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1235 * possible)
1236 */
1237 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1238 *root, struct btrfs_key *key, struct btrfs_path *p, int
1239 ins_len, int cow)
1240 {
1241 struct extent_buffer *b;
1242 struct extent_buffer *tmp;
1243 int slot;
1244 int ret;
1245 int level;
1246 int should_reada = p->reada;
1247 int lowest_unlock = 1;
1248 u8 lowest_level = 0;
1249
1250 lowest_level = p->lowest_level;
1251 WARN_ON(lowest_level && ins_len);
1252 WARN_ON(p->nodes[0] != NULL);
1253 WARN_ON(root == root->fs_info->extent_root &&
1254 !mutex_is_locked(&root->fs_info->alloc_mutex));
1255 WARN_ON(root == root->fs_info->chunk_root &&
1256 !mutex_is_locked(&root->fs_info->chunk_mutex));
1257 WARN_ON(root == root->fs_info->dev_root &&
1258 !mutex_is_locked(&root->fs_info->chunk_mutex));
1259 if (ins_len < 0)
1260 lowest_unlock = 2;
1261 again:
1262 b = btrfs_lock_root_node(root);
1263
1264 while (b) {
1265 level = btrfs_header_level(b);
1266 if (cow) {
1267 int wret;
1268 wret = btrfs_cow_block(trans, root, b,
1269 p->nodes[level + 1],
1270 p->slots[level + 1],
1271 &b);
1272 if (wret) {
1273 free_extent_buffer(b);
1274 return wret;
1275 }
1276 }
1277 BUG_ON(!cow && ins_len);
1278 if (level != btrfs_header_level(b))
1279 WARN_ON(1);
1280 level = btrfs_header_level(b);
1281 p->nodes[level] = b;
1282 p->locks[level] = 1;
1283 ret = check_block(root, p, level);
1284 if (ret)
1285 return -1;
1286
1287 ret = bin_search(b, key, level, &slot);
1288 if (level != 0) {
1289 if (ret && slot > 0)
1290 slot -= 1;
1291 p->slots[level] = slot;
1292 if (ins_len > 0 && btrfs_header_nritems(b) >=
1293 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1294 int sret = split_node(trans, root, p, level);
1295 BUG_ON(sret > 0);
1296 if (sret)
1297 return sret;
1298 b = p->nodes[level];
1299 slot = p->slots[level];
1300 } else if (ins_len < 0) {
1301 int sret = balance_level(trans, root, p,
1302 level);
1303 if (sret)
1304 return sret;
1305 b = p->nodes[level];
1306 if (!b) {
1307 btrfs_release_path(NULL, p);
1308 goto again;
1309 }
1310 slot = p->slots[level];
1311 BUG_ON(btrfs_header_nritems(b) == 1);
1312 }
1313 /* this is only true while dropping a snapshot */
1314 if (level == lowest_level) {
1315 unlock_up(p, level, lowest_unlock);
1316 break;
1317 }
1318
1319 if (should_reada)
1320 reada_for_search(root, p, level, slot,
1321 key->objectid);
1322
1323 tmp = btrfs_find_tree_block(root,
1324 btrfs_node_blockptr(b, slot),
1325 btrfs_level_size(root, level - 1));
1326 if (tmp && btrfs_buffer_uptodate(tmp,
1327 btrfs_node_ptr_generation(b, slot))) {
1328 b = tmp;
1329 } else {
1330 /*
1331 * reduce lock contention at high levels
1332 * of the btree by dropping locks before
1333 * we read.
1334 */
1335 if (level > 1) {
1336 btrfs_release_path(NULL, p);
1337 if (tmp)
1338 free_extent_buffer(tmp);
1339 goto again;
1340 } else {
1341 b = read_node_slot(root, b, slot);
1342 }
1343 }
1344 btrfs_tree_lock(b);
1345 unlock_up(p, level, lowest_unlock);
1346 } else {
1347 p->slots[level] = slot;
1348 if (ins_len > 0 && btrfs_leaf_free_space(root, b) <
1349 sizeof(struct btrfs_item) + ins_len) {
1350 int sret = split_leaf(trans, root, key,
1351 p, ins_len, ret == 0);
1352 BUG_ON(sret > 0);
1353 if (sret)
1354 return sret;
1355 }
1356 unlock_up(p, level, lowest_unlock);
1357 return ret;
1358 }
1359 }
1360 return 1;
1361 }
1362
1363 /*
1364 * adjust the pointers going up the tree, starting at level
1365 * making sure the right key of each node is points to 'key'.
1366 * This is used after shifting pointers to the left, so it stops
1367 * fixing up pointers when a given leaf/node is not in slot 0 of the
1368 * higher levels
1369 *
1370 * If this fails to write a tree block, it returns -1, but continues
1371 * fixing up the blocks in ram so the tree is consistent.
1372 */
1373 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1374 struct btrfs_root *root, struct btrfs_path *path,
1375 struct btrfs_disk_key *key, int level)
1376 {
1377 int i;
1378 int ret = 0;
1379 struct extent_buffer *t;
1380
1381 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1382 int tslot = path->slots[i];
1383 if (!path->nodes[i])
1384 break;
1385 t = path->nodes[i];
1386 btrfs_set_node_key(t, key, tslot);
1387 if (!btrfs_tree_locked(path->nodes[i])) {
1388 int ii;
1389 printk("fixup without lock on level %d\n", btrfs_header_level(path->nodes[i]));
1390 for (ii = 0; ii < BTRFS_MAX_LEVEL; ii++) {
1391 printk("level %d slot %d\n", ii, path->slots[ii]);
1392 }
1393 }
1394 btrfs_mark_buffer_dirty(path->nodes[i]);
1395 if (tslot != 0)
1396 break;
1397 }
1398 return ret;
1399 }
1400
1401 /*
1402 * try to push data from one node into the next node left in the
1403 * tree.
1404 *
1405 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1406 * error, and > 0 if there was no room in the left hand block.
1407 */
1408 static int push_node_left(struct btrfs_trans_handle *trans,
1409 struct btrfs_root *root, struct extent_buffer *dst,
1410 struct extent_buffer *src, int empty)
1411 {
1412 int push_items = 0;
1413 int src_nritems;
1414 int dst_nritems;
1415 int ret = 0;
1416
1417 src_nritems = btrfs_header_nritems(src);
1418 dst_nritems = btrfs_header_nritems(dst);
1419 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1420 WARN_ON(btrfs_header_generation(src) != trans->transid);
1421 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1422
1423 if (!empty && src_nritems <= 8)
1424 return 1;
1425
1426 if (push_items <= 0) {
1427 return 1;
1428 }
1429
1430 if (empty) {
1431 push_items = min(src_nritems, push_items);
1432 if (push_items < src_nritems) {
1433 /* leave at least 8 pointers in the node if
1434 * we aren't going to empty it
1435 */
1436 if (src_nritems - push_items < 8) {
1437 if (push_items <= 8)
1438 return 1;
1439 push_items -= 8;
1440 }
1441 }
1442 } else
1443 push_items = min(src_nritems - 8, push_items);
1444
1445 copy_extent_buffer(dst, src,
1446 btrfs_node_key_ptr_offset(dst_nritems),
1447 btrfs_node_key_ptr_offset(0),
1448 push_items * sizeof(struct btrfs_key_ptr));
1449
1450 if (push_items < src_nritems) {
1451 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1452 btrfs_node_key_ptr_offset(push_items),
1453 (src_nritems - push_items) *
1454 sizeof(struct btrfs_key_ptr));
1455 }
1456 btrfs_set_header_nritems(src, src_nritems - push_items);
1457 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1458 btrfs_mark_buffer_dirty(src);
1459 btrfs_mark_buffer_dirty(dst);
1460 return ret;
1461 }
1462
1463 /*
1464 * try to push data from one node into the next node right in the
1465 * tree.
1466 *
1467 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
1468 * error, and > 0 if there was no room in the right hand block.
1469 *
1470 * this will only push up to 1/2 the contents of the left node over
1471 */
1472 static int balance_node_right(struct btrfs_trans_handle *trans,
1473 struct btrfs_root *root,
1474 struct extent_buffer *dst,
1475 struct extent_buffer *src)
1476 {
1477 int push_items = 0;
1478 int max_push;
1479 int src_nritems;
1480 int dst_nritems;
1481 int ret = 0;
1482
1483 WARN_ON(btrfs_header_generation(src) != trans->transid);
1484 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1485
1486 src_nritems = btrfs_header_nritems(src);
1487 dst_nritems = btrfs_header_nritems(dst);
1488 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1489 if (push_items <= 0) {
1490 return 1;
1491 }
1492
1493 if (src_nritems < 4) {
1494 return 1;
1495 }
1496
1497 max_push = src_nritems / 2 + 1;
1498 /* don't try to empty the node */
1499 if (max_push >= src_nritems) {
1500 return 1;
1501 }
1502
1503 if (max_push < push_items)
1504 push_items = max_push;
1505
1506 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
1507 btrfs_node_key_ptr_offset(0),
1508 (dst_nritems) *
1509 sizeof(struct btrfs_key_ptr));
1510
1511 copy_extent_buffer(dst, src,
1512 btrfs_node_key_ptr_offset(0),
1513 btrfs_node_key_ptr_offset(src_nritems - push_items),
1514 push_items * sizeof(struct btrfs_key_ptr));
1515
1516 btrfs_set_header_nritems(src, src_nritems - push_items);
1517 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1518
1519 btrfs_mark_buffer_dirty(src);
1520 btrfs_mark_buffer_dirty(dst);
1521 return ret;
1522 }
1523
1524 /*
1525 * helper function to insert a new root level in the tree.
1526 * A new node is allocated, and a single item is inserted to
1527 * point to the existing root
1528 *
1529 * returns zero on success or < 0 on failure.
1530 */
1531 static int noinline insert_new_root(struct btrfs_trans_handle *trans,
1532 struct btrfs_root *root,
1533 struct btrfs_path *path, int level)
1534 {
1535 u64 root_gen;
1536 u64 lower_gen;
1537 struct extent_buffer *lower;
1538 struct extent_buffer *c;
1539 struct extent_buffer *old;
1540 struct btrfs_disk_key lower_key;
1541
1542 BUG_ON(path->nodes[level]);
1543 BUG_ON(path->nodes[level-1] != root->node);
1544
1545 if (root->ref_cows)
1546 root_gen = trans->transid;
1547 else
1548 root_gen = 0;
1549
1550 lower = path->nodes[level-1];
1551 if (level == 1)
1552 btrfs_item_key(lower, &lower_key, 0);
1553 else
1554 btrfs_node_key(lower, &lower_key, 0);
1555
1556 c = btrfs_alloc_free_block(trans, root, root->nodesize,
1557 root->root_key.objectid,
1558 root_gen, lower_key.objectid, level,
1559 root->node->start, 0);
1560 if (IS_ERR(c))
1561 return PTR_ERR(c);
1562
1563 memset_extent_buffer(c, 0, 0, root->nodesize);
1564 btrfs_set_header_nritems(c, 1);
1565 btrfs_set_header_level(c, level);
1566 btrfs_set_header_bytenr(c, c->start);
1567 btrfs_set_header_generation(c, trans->transid);
1568 btrfs_set_header_owner(c, root->root_key.objectid);
1569
1570 write_extent_buffer(c, root->fs_info->fsid,
1571 (unsigned long)btrfs_header_fsid(c),
1572 BTRFS_FSID_SIZE);
1573
1574 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
1575 (unsigned long)btrfs_header_chunk_tree_uuid(c),
1576 BTRFS_UUID_SIZE);
1577
1578 btrfs_set_node_key(c, &lower_key, 0);
1579 btrfs_set_node_blockptr(c, 0, lower->start);
1580 lower_gen = btrfs_header_generation(lower);
1581 WARN_ON(lower_gen == 0);
1582
1583 btrfs_set_node_ptr_generation(c, 0, lower_gen);
1584
1585 btrfs_mark_buffer_dirty(c);
1586
1587 spin_lock(&root->node_lock);
1588 old = root->node;
1589 root->node = c;
1590 spin_unlock(&root->node_lock);
1591
1592 /* the super has an extra ref to root->node */
1593 free_extent_buffer(old);
1594
1595 add_root_to_dirty_list(root);
1596 extent_buffer_get(c);
1597 path->nodes[level] = c;
1598 path->locks[level] = 1;
1599 path->slots[level] = 0;
1600
1601 if (root->ref_cows && lower_gen != trans->transid) {
1602 struct btrfs_path *back_path = btrfs_alloc_path();
1603 int ret;
1604 mutex_lock(&root->fs_info->alloc_mutex);
1605 ret = btrfs_insert_extent_backref(trans,
1606 root->fs_info->extent_root,
1607 path, lower->start,
1608 root->root_key.objectid,
1609 trans->transid, 0, 0);
1610 BUG_ON(ret);
1611 mutex_unlock(&root->fs_info->alloc_mutex);
1612 btrfs_free_path(back_path);
1613 }
1614 return 0;
1615 }
1616
1617 /*
1618 * worker function to insert a single pointer in a node.
1619 * the node should have enough room for the pointer already
1620 *
1621 * slot and level indicate where you want the key to go, and
1622 * blocknr is the block the key points to.
1623 *
1624 * returns zero on success and < 0 on any error
1625 */
1626 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
1627 *root, struct btrfs_path *path, struct btrfs_disk_key
1628 *key, u64 bytenr, int slot, int level)
1629 {
1630 struct extent_buffer *lower;
1631 int nritems;
1632
1633 BUG_ON(!path->nodes[level]);
1634 lower = path->nodes[level];
1635 nritems = btrfs_header_nritems(lower);
1636 if (slot > nritems)
1637 BUG();
1638 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
1639 BUG();
1640 if (slot != nritems) {
1641 memmove_extent_buffer(lower,
1642 btrfs_node_key_ptr_offset(slot + 1),
1643 btrfs_node_key_ptr_offset(slot),
1644 (nritems - slot) * sizeof(struct btrfs_key_ptr));
1645 }
1646 btrfs_set_node_key(lower, key, slot);
1647 btrfs_set_node_blockptr(lower, slot, bytenr);
1648 WARN_ON(trans->transid == 0);
1649 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
1650 btrfs_set_header_nritems(lower, nritems + 1);
1651 btrfs_mark_buffer_dirty(lower);
1652 return 0;
1653 }
1654
1655 /*
1656 * split the node at the specified level in path in two.
1657 * The path is corrected to point to the appropriate node after the split
1658 *
1659 * Before splitting this tries to make some room in the node by pushing
1660 * left and right, if either one works, it returns right away.
1661 *
1662 * returns 0 on success and < 0 on failure
1663 */
1664 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
1665 *root, struct btrfs_path *path, int level)
1666 {
1667 u64 root_gen;
1668 struct extent_buffer *c;
1669 struct extent_buffer *split;
1670 struct btrfs_disk_key disk_key;
1671 int mid;
1672 int ret;
1673 int wret;
1674 u32 c_nritems;
1675
1676 c = path->nodes[level];
1677 WARN_ON(btrfs_header_generation(c) != trans->transid);
1678 if (c == root->node) {
1679 /* trying to split the root, lets make a new one */
1680 ret = insert_new_root(trans, root, path, level + 1);
1681 if (ret)
1682 return ret;
1683 } else {
1684 ret = push_nodes_for_insert(trans, root, path, level);
1685 c = path->nodes[level];
1686 if (!ret && btrfs_header_nritems(c) <
1687 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
1688 return 0;
1689 if (ret < 0)
1690 return ret;
1691 }
1692
1693 c_nritems = btrfs_header_nritems(c);
1694 if (root->ref_cows)
1695 root_gen = trans->transid;
1696 else
1697 root_gen = 0;
1698
1699 btrfs_node_key(c, &disk_key, 0);
1700 split = btrfs_alloc_free_block(trans, root, root->nodesize,
1701 root->root_key.objectid,
1702 root_gen,
1703 btrfs_disk_key_objectid(&disk_key),
1704 level, c->start, 0);
1705 if (IS_ERR(split))
1706 return PTR_ERR(split);
1707
1708 btrfs_set_header_flags(split, btrfs_header_flags(c));
1709 btrfs_set_header_level(split, btrfs_header_level(c));
1710 btrfs_set_header_bytenr(split, split->start);
1711 btrfs_set_header_generation(split, trans->transid);
1712 btrfs_set_header_owner(split, root->root_key.objectid);
1713 btrfs_set_header_flags(split, 0);
1714 write_extent_buffer(split, root->fs_info->fsid,
1715 (unsigned long)btrfs_header_fsid(split),
1716 BTRFS_FSID_SIZE);
1717 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
1718 (unsigned long)btrfs_header_chunk_tree_uuid(split),
1719 BTRFS_UUID_SIZE);
1720
1721 mid = (c_nritems + 1) / 2;
1722
1723 copy_extent_buffer(split, c,
1724 btrfs_node_key_ptr_offset(0),
1725 btrfs_node_key_ptr_offset(mid),
1726 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
1727 btrfs_set_header_nritems(split, c_nritems - mid);
1728 btrfs_set_header_nritems(c, mid);
1729 ret = 0;
1730
1731 btrfs_mark_buffer_dirty(c);
1732 btrfs_mark_buffer_dirty(split);
1733
1734 btrfs_node_key(split, &disk_key, 0);
1735 wret = insert_ptr(trans, root, path, &disk_key, split->start,
1736 path->slots[level + 1] + 1,
1737 level + 1);
1738 if (wret)
1739 ret = wret;
1740
1741 if (path->slots[level] >= mid) {
1742 path->slots[level] -= mid;
1743 btrfs_tree_unlock(c);
1744 free_extent_buffer(c);
1745 path->nodes[level] = split;
1746 path->slots[level + 1] += 1;
1747 } else {
1748 btrfs_tree_unlock(split);
1749 free_extent_buffer(split);
1750 }
1751 return ret;
1752 }
1753
1754 /*
1755 * how many bytes are required to store the items in a leaf. start
1756 * and nr indicate which items in the leaf to check. This totals up the
1757 * space used both by the item structs and the item data
1758 */
1759 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
1760 {
1761 int data_len;
1762 int nritems = btrfs_header_nritems(l);
1763 int end = min(nritems, start + nr) - 1;
1764
1765 if (!nr)
1766 return 0;
1767 data_len = btrfs_item_end_nr(l, start);
1768 data_len = data_len - btrfs_item_offset_nr(l, end);
1769 data_len += sizeof(struct btrfs_item) * nr;
1770 WARN_ON(data_len < 0);
1771 return data_len;
1772 }
1773
1774 /*
1775 * The space between the end of the leaf items and
1776 * the start of the leaf data. IOW, how much room
1777 * the leaf has left for both items and data
1778 */
1779 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf)
1780 {
1781 int nritems = btrfs_header_nritems(leaf);
1782 int ret;
1783 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
1784 if (ret < 0) {
1785 printk("leaf free space ret %d, leaf data size %lu, used %d nritems %d\n",
1786 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
1787 leaf_space_used(leaf, 0, nritems), nritems);
1788 }
1789 return ret;
1790 }
1791
1792 /*
1793 * push some data in the path leaf to the right, trying to free up at
1794 * least data_size bytes. returns zero if the push worked, nonzero otherwise
1795 *
1796 * returns 1 if the push failed because the other node didn't have enough
1797 * room, 0 if everything worked out and < 0 if there were major errors.
1798 */
1799 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
1800 *root, struct btrfs_path *path, int data_size,
1801 int empty)
1802 {
1803 struct extent_buffer *left = path->nodes[0];
1804 struct extent_buffer *right;
1805 struct extent_buffer *upper;
1806 struct btrfs_disk_key disk_key;
1807 int slot;
1808 u32 i;
1809 int free_space;
1810 int push_space = 0;
1811 int push_items = 0;
1812 struct btrfs_item *item;
1813 u32 left_nritems;
1814 u32 nr;
1815 u32 right_nritems;
1816 u32 data_end;
1817 u32 this_item_size;
1818 int ret;
1819
1820 slot = path->slots[1];
1821 if (!path->nodes[1]) {
1822 return 1;
1823 }
1824 upper = path->nodes[1];
1825 if (slot >= btrfs_header_nritems(upper) - 1)
1826 return 1;
1827
1828 WARN_ON(!btrfs_tree_locked(path->nodes[1]));
1829
1830 right = read_node_slot(root, upper, slot + 1);
1831 btrfs_tree_lock(right);
1832 free_space = btrfs_leaf_free_space(root, right);
1833 if (free_space < data_size + sizeof(struct btrfs_item))
1834 goto out_unlock;
1835
1836 /* cow and double check */
1837 ret = btrfs_cow_block(trans, root, right, upper,
1838 slot + 1, &right);
1839 if (ret)
1840 goto out_unlock;
1841
1842 free_space = btrfs_leaf_free_space(root, right);
1843 if (free_space < data_size + sizeof(struct btrfs_item))
1844 goto out_unlock;
1845
1846 left_nritems = btrfs_header_nritems(left);
1847 if (left_nritems == 0)
1848 goto out_unlock;
1849
1850 if (empty)
1851 nr = 0;
1852 else
1853 nr = 1;
1854
1855 i = left_nritems - 1;
1856 while (i >= nr) {
1857 item = btrfs_item_nr(left, i);
1858
1859 if (path->slots[0] == i)
1860 push_space += data_size + sizeof(*item);
1861
1862 if (!left->map_token) {
1863 map_extent_buffer(left, (unsigned long)item,
1864 sizeof(struct btrfs_item),
1865 &left->map_token, &left->kaddr,
1866 &left->map_start, &left->map_len,
1867 KM_USER1);
1868 }
1869
1870 this_item_size = btrfs_item_size(left, item);
1871 if (this_item_size + sizeof(*item) + push_space > free_space)
1872 break;
1873 push_items++;
1874 push_space += this_item_size + sizeof(*item);
1875 if (i == 0)
1876 break;
1877 i--;
1878 }
1879 if (left->map_token) {
1880 unmap_extent_buffer(left, left->map_token, KM_USER1);
1881 left->map_token = NULL;
1882 }
1883
1884 if (push_items == 0)
1885 goto out_unlock;
1886
1887 if (!empty && push_items == left_nritems)
1888 WARN_ON(1);
1889
1890 /* push left to right */
1891 right_nritems = btrfs_header_nritems(right);
1892
1893 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
1894 push_space -= leaf_data_end(root, left);
1895
1896 /* make room in the right data area */
1897 data_end = leaf_data_end(root, right);
1898 memmove_extent_buffer(right,
1899 btrfs_leaf_data(right) + data_end - push_space,
1900 btrfs_leaf_data(right) + data_end,
1901 BTRFS_LEAF_DATA_SIZE(root) - data_end);
1902
1903 /* copy from the left data area */
1904 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
1905 BTRFS_LEAF_DATA_SIZE(root) - push_space,
1906 btrfs_leaf_data(left) + leaf_data_end(root, left),
1907 push_space);
1908
1909 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
1910 btrfs_item_nr_offset(0),
1911 right_nritems * sizeof(struct btrfs_item));
1912
1913 /* copy the items from left to right */
1914 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
1915 btrfs_item_nr_offset(left_nritems - push_items),
1916 push_items * sizeof(struct btrfs_item));
1917
1918 /* update the item pointers */
1919 right_nritems += push_items;
1920 btrfs_set_header_nritems(right, right_nritems);
1921 push_space = BTRFS_LEAF_DATA_SIZE(root);
1922 for (i = 0; i < right_nritems; i++) {
1923 item = btrfs_item_nr(right, i);
1924 if (!right->map_token) {
1925 map_extent_buffer(right, (unsigned long)item,
1926 sizeof(struct btrfs_item),
1927 &right->map_token, &right->kaddr,
1928 &right->map_start, &right->map_len,
1929 KM_USER1);
1930 }
1931 push_space -= btrfs_item_size(right, item);
1932 btrfs_set_item_offset(right, item, push_space);
1933 }
1934
1935 if (right->map_token) {
1936 unmap_extent_buffer(right, right->map_token, KM_USER1);
1937 right->map_token = NULL;
1938 }
1939 left_nritems -= push_items;
1940 btrfs_set_header_nritems(left, left_nritems);
1941
1942 if (left_nritems)
1943 btrfs_mark_buffer_dirty(left);
1944 btrfs_mark_buffer_dirty(right);
1945
1946 btrfs_item_key(right, &disk_key, 0);
1947 btrfs_set_node_key(upper, &disk_key, slot + 1);
1948 btrfs_mark_buffer_dirty(upper);
1949
1950 /* then fixup the leaf pointer in the path */
1951 if (path->slots[0] >= left_nritems) {
1952 path->slots[0] -= left_nritems;
1953 if (btrfs_header_nritems(path->nodes[0]) == 0)
1954 clean_tree_block(trans, root, path->nodes[0]);
1955 btrfs_tree_unlock(path->nodes[0]);
1956 free_extent_buffer(path->nodes[0]);
1957 path->nodes[0] = right;
1958 path->slots[1] += 1;
1959 } else {
1960 btrfs_tree_unlock(right);
1961 free_extent_buffer(right);
1962 }
1963 return 0;
1964
1965 out_unlock:
1966 btrfs_tree_unlock(right);
1967 free_extent_buffer(right);
1968 return 1;
1969 }
1970
1971 /*
1972 * push some data in the path leaf to the left, trying to free up at
1973 * least data_size bytes. returns zero if the push worked, nonzero otherwise
1974 */
1975 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
1976 *root, struct btrfs_path *path, int data_size,
1977 int empty)
1978 {
1979 struct btrfs_disk_key disk_key;
1980 struct extent_buffer *right = path->nodes[0];
1981 struct extent_buffer *left;
1982 int slot;
1983 int i;
1984 int free_space;
1985 int push_space = 0;
1986 int push_items = 0;
1987 struct btrfs_item *item;
1988 u32 old_left_nritems;
1989 u32 right_nritems;
1990 u32 nr;
1991 int ret = 0;
1992 int wret;
1993 u32 this_item_size;
1994 u32 old_left_item_size;
1995
1996 slot = path->slots[1];
1997 if (slot == 0)
1998 return 1;
1999 if (!path->nodes[1])
2000 return 1;
2001
2002 right_nritems = btrfs_header_nritems(right);
2003 if (right_nritems == 0) {
2004 return 1;
2005 }
2006
2007 WARN_ON(!btrfs_tree_locked(path->nodes[1]));
2008
2009 left = read_node_slot(root, path->nodes[1], slot - 1);
2010 btrfs_tree_lock(left);
2011 free_space = btrfs_leaf_free_space(root, left);
2012 if (free_space < data_size + sizeof(struct btrfs_item)) {
2013 ret = 1;
2014 goto out;
2015 }
2016
2017 /* cow and double check */
2018 ret = btrfs_cow_block(trans, root, left,
2019 path->nodes[1], slot - 1, &left);
2020 if (ret) {
2021 /* we hit -ENOSPC, but it isn't fatal here */
2022 ret = 1;
2023 goto out;
2024 }
2025
2026 free_space = btrfs_leaf_free_space(root, left);
2027 if (free_space < data_size + sizeof(struct btrfs_item)) {
2028 ret = 1;
2029 goto out;
2030 }
2031
2032 if (empty)
2033 nr = right_nritems;
2034 else
2035 nr = right_nritems - 1;
2036
2037 for (i = 0; i < nr; i++) {
2038 item = btrfs_item_nr(right, i);
2039 if (!right->map_token) {
2040 map_extent_buffer(right, (unsigned long)item,
2041 sizeof(struct btrfs_item),
2042 &right->map_token, &right->kaddr,
2043 &right->map_start, &right->map_len,
2044 KM_USER1);
2045 }
2046
2047 if (path->slots[0] == i)
2048 push_space += data_size + sizeof(*item);
2049
2050 this_item_size = btrfs_item_size(right, item);
2051 if (this_item_size + sizeof(*item) + push_space > free_space)
2052 break;
2053
2054 push_items++;
2055 push_space += this_item_size + sizeof(*item);
2056 }
2057
2058 if (right->map_token) {
2059 unmap_extent_buffer(right, right->map_token, KM_USER1);
2060 right->map_token = NULL;
2061 }
2062
2063 if (push_items == 0) {
2064 ret = 1;
2065 goto out;
2066 }
2067 if (!empty && push_items == btrfs_header_nritems(right))
2068 WARN_ON(1);
2069
2070 /* push data from right to left */
2071 copy_extent_buffer(left, right,
2072 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2073 btrfs_item_nr_offset(0),
2074 push_items * sizeof(struct btrfs_item));
2075
2076 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2077 btrfs_item_offset_nr(right, push_items -1);
2078
2079 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2080 leaf_data_end(root, left) - push_space,
2081 btrfs_leaf_data(right) +
2082 btrfs_item_offset_nr(right, push_items - 1),
2083 push_space);
2084 old_left_nritems = btrfs_header_nritems(left);
2085 BUG_ON(old_left_nritems < 0);
2086
2087 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2088 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2089 u32 ioff;
2090
2091 item = btrfs_item_nr(left, i);
2092 if (!left->map_token) {
2093 map_extent_buffer(left, (unsigned long)item,
2094 sizeof(struct btrfs_item),
2095 &left->map_token, &left->kaddr,
2096 &left->map_start, &left->map_len,
2097 KM_USER1);
2098 }
2099
2100 ioff = btrfs_item_offset(left, item);
2101 btrfs_set_item_offset(left, item,
2102 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2103 }
2104 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2105 if (left->map_token) {
2106 unmap_extent_buffer(left, left->map_token, KM_USER1);
2107 left->map_token = NULL;
2108 }
2109
2110 /* fixup right node */
2111 if (push_items > right_nritems) {
2112 printk("push items %d nr %u\n", push_items, right_nritems);
2113 WARN_ON(1);
2114 }
2115
2116 if (push_items < right_nritems) {
2117 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2118 leaf_data_end(root, right);
2119 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2120 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2121 btrfs_leaf_data(right) +
2122 leaf_data_end(root, right), push_space);
2123
2124 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2125 btrfs_item_nr_offset(push_items),
2126 (btrfs_header_nritems(right) - push_items) *
2127 sizeof(struct btrfs_item));
2128 }
2129 right_nritems -= push_items;
2130 btrfs_set_header_nritems(right, right_nritems);
2131 push_space = BTRFS_LEAF_DATA_SIZE(root);
2132 for (i = 0; i < right_nritems; i++) {
2133 item = btrfs_item_nr(right, i);
2134
2135 if (!right->map_token) {
2136 map_extent_buffer(right, (unsigned long)item,
2137 sizeof(struct btrfs_item),
2138 &right->map_token, &right->kaddr,
2139 &right->map_start, &right->map_len,
2140 KM_USER1);
2141 }
2142
2143 push_space = push_space - btrfs_item_size(right, item);
2144 btrfs_set_item_offset(right, item, push_space);
2145 }
2146 if (right->map_token) {
2147 unmap_extent_buffer(right, right->map_token, KM_USER1);
2148 right->map_token = NULL;
2149 }
2150
2151 btrfs_mark_buffer_dirty(left);
2152 if (right_nritems)
2153 btrfs_mark_buffer_dirty(right);
2154
2155 btrfs_item_key(right, &disk_key, 0);
2156 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2157 if (wret)
2158 ret = wret;
2159
2160 /* then fixup the leaf pointer in the path */
2161 if (path->slots[0] < push_items) {
2162 path->slots[0] += old_left_nritems;
2163 if (btrfs_header_nritems(path->nodes[0]) == 0)
2164 clean_tree_block(trans, root, path->nodes[0]);
2165 btrfs_tree_unlock(path->nodes[0]);
2166 free_extent_buffer(path->nodes[0]);
2167 path->nodes[0] = left;
2168 path->slots[1] -= 1;
2169 } else {
2170 btrfs_tree_unlock(left);
2171 free_extent_buffer(left);
2172 path->slots[0] -= push_items;
2173 }
2174 BUG_ON(path->slots[0] < 0);
2175 return ret;
2176 out:
2177 btrfs_tree_unlock(left);
2178 free_extent_buffer(left);
2179 return ret;
2180 }
2181
2182 /*
2183 * split the path's leaf in two, making sure there is at least data_size
2184 * available for the resulting leaf level of the path.
2185 *
2186 * returns 0 if all went well and < 0 on failure.
2187 */
2188 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
2189 *root, struct btrfs_key *ins_key,
2190 struct btrfs_path *path, int data_size, int extend)
2191 {
2192 u64 root_gen;
2193 struct extent_buffer *l;
2194 u32 nritems;
2195 int mid;
2196 int slot;
2197 struct extent_buffer *right;
2198 int space_needed = data_size + sizeof(struct btrfs_item);
2199 int data_copy_size;
2200 int rt_data_off;
2201 int i;
2202 int ret = 0;
2203 int wret;
2204 int double_split;
2205 int num_doubles = 0;
2206 struct btrfs_disk_key disk_key;
2207
2208 if (extend)
2209 space_needed = data_size;
2210
2211 if (root->ref_cows)
2212 root_gen = trans->transid;
2213 else
2214 root_gen = 0;
2215
2216 /* first try to make some room by pushing left and right */
2217 if (ins_key->type != BTRFS_DIR_ITEM_KEY) {
2218 wret = push_leaf_right(trans, root, path, data_size, 0);
2219 if (wret < 0) {
2220 return wret;
2221 }
2222 if (wret) {
2223 wret = push_leaf_left(trans, root, path, data_size, 0);
2224 if (wret < 0)
2225 return wret;
2226 }
2227 l = path->nodes[0];
2228
2229 /* did the pushes work? */
2230 if (btrfs_leaf_free_space(root, l) >= space_needed)
2231 return 0;
2232 }
2233
2234 if (!path->nodes[1]) {
2235 ret = insert_new_root(trans, root, path, 1);
2236 if (ret)
2237 return ret;
2238 }
2239 again:
2240 double_split = 0;
2241 l = path->nodes[0];
2242 slot = path->slots[0];
2243 nritems = btrfs_header_nritems(l);
2244 mid = (nritems + 1)/ 2;
2245
2246 btrfs_item_key(l, &disk_key, 0);
2247
2248 right = btrfs_alloc_free_block(trans, root, root->leafsize,
2249 root->root_key.objectid,
2250 root_gen, disk_key.objectid, 0,
2251 l->start, 0);
2252 if (IS_ERR(right)) {
2253 BUG_ON(1);
2254 return PTR_ERR(right);
2255 }
2256
2257 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2258 btrfs_set_header_bytenr(right, right->start);
2259 btrfs_set_header_generation(right, trans->transid);
2260 btrfs_set_header_owner(right, root->root_key.objectid);
2261 btrfs_set_header_level(right, 0);
2262 write_extent_buffer(right, root->fs_info->fsid,
2263 (unsigned long)btrfs_header_fsid(right),
2264 BTRFS_FSID_SIZE);
2265
2266 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2267 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2268 BTRFS_UUID_SIZE);
2269 if (mid <= slot) {
2270 if (nritems == 1 ||
2271 leaf_space_used(l, mid, nritems - mid) + space_needed >
2272 BTRFS_LEAF_DATA_SIZE(root)) {
2273 if (slot >= nritems) {
2274 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2275 btrfs_set_header_nritems(right, 0);
2276 wret = insert_ptr(trans, root, path,
2277 &disk_key, right->start,
2278 path->slots[1] + 1, 1);
2279 if (wret)
2280 ret = wret;
2281
2282 btrfs_tree_unlock(path->nodes[0]);
2283 free_extent_buffer(path->nodes[0]);
2284 path->nodes[0] = right;
2285 path->slots[0] = 0;
2286 path->slots[1] += 1;
2287 btrfs_mark_buffer_dirty(right);
2288 return ret;
2289 }
2290 mid = slot;
2291 if (mid != nritems &&
2292 leaf_space_used(l, mid, nritems - mid) +
2293 space_needed > BTRFS_LEAF_DATA_SIZE(root)) {
2294 double_split = 1;
2295 }
2296 }
2297 } else {
2298 if (leaf_space_used(l, 0, mid + 1) + space_needed >
2299 BTRFS_LEAF_DATA_SIZE(root)) {
2300 if (!extend && slot == 0) {
2301 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2302 btrfs_set_header_nritems(right, 0);
2303 wret = insert_ptr(trans, root, path,
2304 &disk_key,
2305 right->start,
2306 path->slots[1], 1);
2307 if (wret)
2308 ret = wret;
2309 btrfs_tree_unlock(path->nodes[0]);
2310 free_extent_buffer(path->nodes[0]);
2311 path->nodes[0] = right;
2312 path->slots[0] = 0;
2313 if (path->slots[1] == 0) {
2314 wret = fixup_low_keys(trans, root,
2315 path, &disk_key, 1);
2316 if (wret)
2317 ret = wret;
2318 }
2319 btrfs_mark_buffer_dirty(right);
2320 return ret;
2321 } else if (extend && slot == 0) {
2322 mid = 1;
2323 } else {
2324 mid = slot;
2325 if (mid != nritems &&
2326 leaf_space_used(l, mid, nritems - mid) +
2327 space_needed > BTRFS_LEAF_DATA_SIZE(root)) {
2328 double_split = 1;
2329 }
2330 }
2331 }
2332 }
2333 nritems = nritems - mid;
2334 btrfs_set_header_nritems(right, nritems);
2335 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2336
2337 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2338 btrfs_item_nr_offset(mid),
2339 nritems * sizeof(struct btrfs_item));
2340
2341 copy_extent_buffer(right, l,
2342 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2343 data_copy_size, btrfs_leaf_data(l) +
2344 leaf_data_end(root, l), data_copy_size);
2345
2346 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2347 btrfs_item_end_nr(l, mid);
2348
2349 for (i = 0; i < nritems; i++) {
2350 struct btrfs_item *item = btrfs_item_nr(right, i);
2351 u32 ioff;
2352
2353 if (!right->map_token) {
2354 map_extent_buffer(right, (unsigned long)item,
2355 sizeof(struct btrfs_item),
2356 &right->map_token, &right->kaddr,
2357 &right->map_start, &right->map_len,
2358 KM_USER1);
2359 }
2360
2361 ioff = btrfs_item_offset(right, item);
2362 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2363 }
2364
2365 if (right->map_token) {
2366 unmap_extent_buffer(right, right->map_token, KM_USER1);
2367 right->map_token = NULL;
2368 }
2369
2370 btrfs_set_header_nritems(l, mid);
2371 ret = 0;
2372 btrfs_item_key(right, &disk_key, 0);
2373 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2374 path->slots[1] + 1, 1);
2375 if (wret)
2376 ret = wret;
2377
2378 btrfs_mark_buffer_dirty(right);
2379 btrfs_mark_buffer_dirty(l);
2380 BUG_ON(path->slots[0] != slot);
2381
2382 if (mid <= slot) {
2383 btrfs_tree_unlock(path->nodes[0]);
2384 free_extent_buffer(path->nodes[0]);
2385 path->nodes[0] = right;
2386 path->slots[0] -= mid;
2387 path->slots[1] += 1;
2388 } else {
2389 btrfs_tree_unlock(right);
2390 free_extent_buffer(right);
2391 }
2392
2393 BUG_ON(path->slots[0] < 0);
2394
2395 if (double_split) {
2396 BUG_ON(num_doubles != 0);
2397 num_doubles++;
2398 goto again;
2399 }
2400 return ret;
2401 }
2402
2403 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
2404 struct btrfs_root *root,
2405 struct btrfs_path *path,
2406 u32 new_size, int from_end)
2407 {
2408 int ret = 0;
2409 int slot;
2410 int slot_orig;
2411 struct extent_buffer *leaf;
2412 struct btrfs_item *item;
2413 u32 nritems;
2414 unsigned int data_end;
2415 unsigned int old_data_start;
2416 unsigned int old_size;
2417 unsigned int size_diff;
2418 int i;
2419
2420 slot_orig = path->slots[0];
2421 leaf = path->nodes[0];
2422 slot = path->slots[0];
2423
2424 old_size = btrfs_item_size_nr(leaf, slot);
2425 if (old_size == new_size)
2426 return 0;
2427
2428 nritems = btrfs_header_nritems(leaf);
2429 data_end = leaf_data_end(root, leaf);
2430
2431 old_data_start = btrfs_item_offset_nr(leaf, slot);
2432
2433 size_diff = old_size - new_size;
2434
2435 BUG_ON(slot < 0);
2436 BUG_ON(slot >= nritems);
2437
2438 /*
2439 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2440 */
2441 /* first correct the data pointers */
2442 for (i = slot; i < nritems; i++) {
2443 u32 ioff;
2444 item = btrfs_item_nr(leaf, i);
2445
2446 if (!leaf->map_token) {
2447 map_extent_buffer(leaf, (unsigned long)item,
2448 sizeof(struct btrfs_item),
2449 &leaf->map_token, &leaf->kaddr,
2450 &leaf->map_start, &leaf->map_len,
2451 KM_USER1);
2452 }
2453
2454 ioff = btrfs_item_offset(leaf, item);
2455 btrfs_set_item_offset(leaf, item, ioff + size_diff);
2456 }
2457
2458 if (leaf->map_token) {
2459 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2460 leaf->map_token = NULL;
2461 }
2462
2463 /* shift the data */
2464 if (from_end) {
2465 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2466 data_end + size_diff, btrfs_leaf_data(leaf) +
2467 data_end, old_data_start + new_size - data_end);
2468 } else {
2469 struct btrfs_disk_key disk_key;
2470 u64 offset;
2471
2472 btrfs_item_key(leaf, &disk_key, slot);
2473
2474 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
2475 unsigned long ptr;
2476 struct btrfs_file_extent_item *fi;
2477
2478 fi = btrfs_item_ptr(leaf, slot,
2479 struct btrfs_file_extent_item);
2480 fi = (struct btrfs_file_extent_item *)(
2481 (unsigned long)fi - size_diff);
2482
2483 if (btrfs_file_extent_type(leaf, fi) ==
2484 BTRFS_FILE_EXTENT_INLINE) {
2485 ptr = btrfs_item_ptr_offset(leaf, slot);
2486 memmove_extent_buffer(leaf, ptr,
2487 (unsigned long)fi,
2488 offsetof(struct btrfs_file_extent_item,
2489 disk_bytenr));
2490 }
2491 }
2492
2493 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2494 data_end + size_diff, btrfs_leaf_data(leaf) +
2495 data_end, old_data_start - data_end);
2496
2497 offset = btrfs_disk_key_offset(&disk_key);
2498 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
2499 btrfs_set_item_key(leaf, &disk_key, slot);
2500 if (slot == 0)
2501 fixup_low_keys(trans, root, path, &disk_key, 1);
2502 }
2503
2504 item = btrfs_item_nr(leaf, slot);
2505 btrfs_set_item_size(leaf, item, new_size);
2506 btrfs_mark_buffer_dirty(leaf);
2507
2508 ret = 0;
2509 if (btrfs_leaf_free_space(root, leaf) < 0) {
2510 btrfs_print_leaf(root, leaf);
2511 BUG();
2512 }
2513 return ret;
2514 }
2515
2516 int btrfs_extend_item(struct btrfs_trans_handle *trans,
2517 struct btrfs_root *root, struct btrfs_path *path,
2518 u32 data_size)
2519 {
2520 int ret = 0;
2521 int slot;
2522 int slot_orig;
2523 struct extent_buffer *leaf;
2524 struct btrfs_item *item;
2525 u32 nritems;
2526 unsigned int data_end;
2527 unsigned int old_data;
2528 unsigned int old_size;
2529 int i;
2530
2531 slot_orig = path->slots[0];
2532 leaf = path->nodes[0];
2533
2534 nritems = btrfs_header_nritems(leaf);
2535 data_end = leaf_data_end(root, leaf);
2536
2537 if (btrfs_leaf_free_space(root, leaf) < data_size) {
2538 btrfs_print_leaf(root, leaf);
2539 BUG();
2540 }
2541 slot = path->slots[0];
2542 old_data = btrfs_item_end_nr(leaf, slot);
2543
2544 BUG_ON(slot < 0);
2545 if (slot >= nritems) {
2546 btrfs_print_leaf(root, leaf);
2547 printk("slot %d too large, nritems %d\n", slot, nritems);
2548 BUG_ON(1);
2549 }
2550
2551 /*
2552 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2553 */
2554 /* first correct the data pointers */
2555 for (i = slot; i < nritems; i++) {
2556 u32 ioff;
2557 item = btrfs_item_nr(leaf, i);
2558
2559 if (!leaf->map_token) {
2560 map_extent_buffer(leaf, (unsigned long)item,
2561 sizeof(struct btrfs_item),
2562 &leaf->map_token, &leaf->kaddr,
2563 &leaf->map_start, &leaf->map_len,
2564 KM_USER1);
2565 }
2566 ioff = btrfs_item_offset(leaf, item);
2567 btrfs_set_item_offset(leaf, item, ioff - data_size);
2568 }
2569
2570 if (leaf->map_token) {
2571 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2572 leaf->map_token = NULL;
2573 }
2574
2575 /* shift the data */
2576 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2577 data_end - data_size, btrfs_leaf_data(leaf) +
2578 data_end, old_data - data_end);
2579
2580 data_end = old_data;
2581 old_size = btrfs_item_size_nr(leaf, slot);
2582 item = btrfs_item_nr(leaf, slot);
2583 btrfs_set_item_size(leaf, item, old_size + data_size);
2584 btrfs_mark_buffer_dirty(leaf);
2585
2586 ret = 0;
2587 if (btrfs_leaf_free_space(root, leaf) < 0) {
2588 btrfs_print_leaf(root, leaf);
2589 BUG();
2590 }
2591 return ret;
2592 }
2593
2594 /*
2595 * Given a key and some data, insert an item into the tree.
2596 * This does all the path init required, making room in the tree if needed.
2597 */
2598 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
2599 struct btrfs_root *root,
2600 struct btrfs_path *path,
2601 struct btrfs_key *cpu_key, u32 *data_size,
2602 int nr)
2603 {
2604 struct extent_buffer *leaf;
2605 struct btrfs_item *item;
2606 int ret = 0;
2607 int slot;
2608 int slot_orig;
2609 int i;
2610 u32 nritems;
2611 u32 total_size = 0;
2612 u32 total_data = 0;
2613 unsigned int data_end;
2614 struct btrfs_disk_key disk_key;
2615
2616 for (i = 0; i < nr; i++) {
2617 total_data += data_size[i];
2618 }
2619
2620 total_size = total_data + (nr - 1) * sizeof(struct btrfs_item);
2621 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
2622 if (ret == 0) {
2623 return -EEXIST;
2624 }
2625 if (ret < 0)
2626 goto out;
2627
2628 slot_orig = path->slots[0];
2629 leaf = path->nodes[0];
2630
2631 nritems = btrfs_header_nritems(leaf);
2632 data_end = leaf_data_end(root, leaf);
2633
2634 if (btrfs_leaf_free_space(root, leaf) <
2635 sizeof(struct btrfs_item) + total_size) {
2636 btrfs_print_leaf(root, leaf);
2637 printk("not enough freespace need %u have %d\n",
2638 total_size, btrfs_leaf_free_space(root, leaf));
2639 BUG();
2640 }
2641
2642 slot = path->slots[0];
2643 BUG_ON(slot < 0);
2644
2645 if (slot != nritems) {
2646 int i;
2647 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
2648
2649 if (old_data < data_end) {
2650 btrfs_print_leaf(root, leaf);
2651 printk("slot %d old_data %d data_end %d\n",
2652 slot, old_data, data_end);
2653 BUG_ON(1);
2654 }
2655 /*
2656 * item0..itemN ... dataN.offset..dataN.size .. data0.size
2657 */
2658 /* first correct the data pointers */
2659 WARN_ON(leaf->map_token);
2660 for (i = slot; i < nritems; i++) {
2661 u32 ioff;
2662
2663 item = btrfs_item_nr(leaf, i);
2664 if (!leaf->map_token) {
2665 map_extent_buffer(leaf, (unsigned long)item,
2666 sizeof(struct btrfs_item),
2667 &leaf->map_token, &leaf->kaddr,
2668 &leaf->map_start, &leaf->map_len,
2669 KM_USER1);
2670 }
2671
2672 ioff = btrfs_item_offset(leaf, item);
2673 btrfs_set_item_offset(leaf, item, ioff - total_data);
2674 }
2675 if (leaf->map_token) {
2676 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2677 leaf->map_token = NULL;
2678 }
2679
2680 /* shift the items */
2681 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
2682 btrfs_item_nr_offset(slot),
2683 (nritems - slot) * sizeof(struct btrfs_item));
2684
2685 /* shift the data */
2686 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2687 data_end - total_data, btrfs_leaf_data(leaf) +
2688 data_end, old_data - data_end);
2689 data_end = old_data;
2690 }
2691
2692 /* setup the item for the new data */
2693 for (i = 0; i < nr; i++) {
2694 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
2695 btrfs_set_item_key(leaf, &disk_key, slot + i);
2696 item = btrfs_item_nr(leaf, slot + i);
2697 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
2698 data_end -= data_size[i];
2699 btrfs_set_item_size(leaf, item, data_size[i]);
2700 }
2701 btrfs_set_header_nritems(leaf, nritems + nr);
2702 btrfs_mark_buffer_dirty(leaf);
2703
2704 ret = 0;
2705 if (slot == 0) {
2706 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
2707 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
2708 }
2709
2710 if (btrfs_leaf_free_space(root, leaf) < 0) {
2711 btrfs_print_leaf(root, leaf);
2712 BUG();
2713 }
2714 out:
2715 return ret;
2716 }
2717
2718 /*
2719 * Given a key and some data, insert an item into the tree.
2720 * This does all the path init required, making room in the tree if needed.
2721 */
2722 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
2723 *root, struct btrfs_key *cpu_key, void *data, u32
2724 data_size)
2725 {
2726 int ret = 0;
2727 struct btrfs_path *path;
2728 struct extent_buffer *leaf;
2729 unsigned long ptr;
2730
2731 path = btrfs_alloc_path();
2732 BUG_ON(!path);
2733 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
2734 if (!ret) {
2735 leaf = path->nodes[0];
2736 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2737 write_extent_buffer(leaf, data, ptr, data_size);
2738 btrfs_mark_buffer_dirty(leaf);
2739 }
2740 btrfs_free_path(path);
2741 return ret;
2742 }
2743
2744 /*
2745 * delete the pointer from a given node.
2746 *
2747 * If the delete empties a node, the node is removed from the tree,
2748 * continuing all the way the root if required. The root is converted into
2749 * a leaf if all the nodes are emptied.
2750 */
2751 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2752 struct btrfs_path *path, int level, int slot)
2753 {
2754 struct extent_buffer *parent = path->nodes[level];
2755 u32 nritems;
2756 int ret = 0;
2757 int wret;
2758
2759 nritems = btrfs_header_nritems(parent);
2760 if (slot != nritems -1) {
2761 memmove_extent_buffer(parent,
2762 btrfs_node_key_ptr_offset(slot),
2763 btrfs_node_key_ptr_offset(slot + 1),
2764 sizeof(struct btrfs_key_ptr) *
2765 (nritems - slot - 1));
2766 }
2767 nritems--;
2768 btrfs_set_header_nritems(parent, nritems);
2769 if (nritems == 0 && parent == root->node) {
2770 BUG_ON(btrfs_header_level(root->node) != 1);
2771 /* just turn the root into a leaf and break */
2772 btrfs_set_header_level(root->node, 0);
2773 } else if (slot == 0) {
2774 struct btrfs_disk_key disk_key;
2775
2776 btrfs_node_key(parent, &disk_key, 0);
2777 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
2778 if (wret)
2779 ret = wret;
2780 }
2781 btrfs_mark_buffer_dirty(parent);
2782 return ret;
2783 }
2784
2785 /*
2786 * delete the item at the leaf level in path. If that empties
2787 * the leaf, remove it from the tree
2788 */
2789 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2790 struct btrfs_path *path, int slot, int nr)
2791 {
2792 struct extent_buffer *leaf;
2793 struct btrfs_item *item;
2794 int last_off;
2795 int dsize = 0;
2796 int ret = 0;
2797 int wret;
2798 int i;
2799 u32 nritems;
2800
2801 leaf = path->nodes[0];
2802 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
2803
2804 for (i = 0; i < nr; i++)
2805 dsize += btrfs_item_size_nr(leaf, slot + i);
2806
2807 nritems = btrfs_header_nritems(leaf);
2808
2809 if (slot + nr != nritems) {
2810 int i;
2811 int data_end = leaf_data_end(root, leaf);
2812
2813 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
2814 data_end + dsize,
2815 btrfs_leaf_data(leaf) + data_end,
2816 last_off - data_end);
2817
2818 for (i = slot + nr; i < nritems; i++) {
2819 u32 ioff;
2820
2821 item = btrfs_item_nr(leaf, i);
2822 if (!leaf->map_token) {
2823 map_extent_buffer(leaf, (unsigned long)item,
2824 sizeof(struct btrfs_item),
2825 &leaf->map_token, &leaf->kaddr,
2826 &leaf->map_start, &leaf->map_len,
2827 KM_USER1);
2828 }
2829 ioff = btrfs_item_offset(leaf, item);
2830 btrfs_set_item_offset(leaf, item, ioff + dsize);
2831 }
2832
2833 if (leaf->map_token) {
2834 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2835 leaf->map_token = NULL;
2836 }
2837
2838 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
2839 btrfs_item_nr_offset(slot + nr),
2840 sizeof(struct btrfs_item) *
2841 (nritems - slot - nr));
2842 }
2843 btrfs_set_header_nritems(leaf, nritems - nr);
2844 nritems -= nr;
2845
2846 /* delete the leaf if we've emptied it */
2847 if (nritems == 0) {
2848 if (leaf == root->node) {
2849 btrfs_set_header_level(leaf, 0);
2850 } else {
2851 u64 root_gen = btrfs_header_generation(path->nodes[1]);
2852 wret = del_ptr(trans, root, path, 1, path->slots[1]);
2853 if (wret)
2854 ret = wret;
2855 wret = btrfs_free_extent(trans, root,
2856 leaf->start, leaf->len,
2857 btrfs_header_owner(path->nodes[1]),
2858 root_gen, 0, 0, 1);
2859 if (wret)
2860 ret = wret;
2861 }
2862 } else {
2863 int used = leaf_space_used(leaf, 0, nritems);
2864 if (slot == 0) {
2865 struct btrfs_disk_key disk_key;
2866
2867 btrfs_item_key(leaf, &disk_key, 0);
2868 wret = fixup_low_keys(trans, root, path,
2869 &disk_key, 1);
2870 if (wret)
2871 ret = wret;
2872 }
2873
2874 /* delete the leaf if it is mostly empty */
2875 if (used < BTRFS_LEAF_DATA_SIZE(root) / 4) {
2876 /* push_leaf_left fixes the path.
2877 * make sure the path still points to our leaf
2878 * for possible call to del_ptr below
2879 */
2880 slot = path->slots[1];
2881 extent_buffer_get(leaf);
2882
2883 wret = push_leaf_left(trans, root, path, 1, 1);
2884 if (wret < 0 && wret != -ENOSPC)
2885 ret = wret;
2886
2887 if (path->nodes[0] == leaf &&
2888 btrfs_header_nritems(leaf)) {
2889 wret = push_leaf_right(trans, root, path, 1, 1);
2890 if (wret < 0 && wret != -ENOSPC)
2891 ret = wret;
2892 }
2893
2894 if (btrfs_header_nritems(leaf) == 0) {
2895 u64 root_gen;
2896 u64 bytenr = leaf->start;
2897 u32 blocksize = leaf->len;
2898
2899 root_gen = btrfs_header_generation(
2900 path->nodes[1]);
2901
2902 wret = del_ptr(trans, root, path, 1, slot);
2903 if (wret)
2904 ret = wret;
2905
2906 free_extent_buffer(leaf);
2907 wret = btrfs_free_extent(trans, root, bytenr,
2908 blocksize,
2909 btrfs_header_owner(path->nodes[1]),
2910 root_gen, 0, 0, 1);
2911 if (wret)
2912 ret = wret;
2913 } else {
2914 /* if we're still in the path, make sure
2915 * we're dirty. Otherwise, one of the
2916 * push_leaf functions must have already
2917 * dirtied this buffer
2918 */
2919 if (path->nodes[0] == leaf)
2920 btrfs_mark_buffer_dirty(leaf);
2921 free_extent_buffer(leaf);
2922 }
2923 } else {
2924 btrfs_mark_buffer_dirty(leaf);
2925 }
2926 }
2927 return ret;
2928 }
2929
2930 /*
2931 * search the tree again to find a leaf with lesser keys
2932 * returns 0 if it found something or 1 if there are no lesser leaves.
2933 * returns < 0 on io errors.
2934 */
2935 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
2936 {
2937 struct btrfs_key key;
2938 struct btrfs_disk_key found_key;
2939 int ret;
2940
2941 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
2942
2943 if (key.offset > 0)
2944 key.offset--;
2945 else if (key.type > 0)
2946 key.type--;
2947 else if (key.objectid > 0)
2948 key.objectid--;
2949 else
2950 return 1;
2951
2952 btrfs_release_path(root, path);
2953 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2954 if (ret < 0)
2955 return ret;
2956 btrfs_item_key(path->nodes[0], &found_key, 0);
2957 ret = comp_keys(&found_key, &key);
2958 if (ret < 0)
2959 return 0;
2960 return 1;
2961 }
2962
2963 /*
2964 * search the tree again to find a leaf with greater keys
2965 * returns 0 if it found something or 1 if there are no greater leaves.
2966 * returns < 0 on io errors.
2967 */
2968 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
2969 {
2970 int slot;
2971 int level = 1;
2972 struct extent_buffer *c;
2973 struct extent_buffer *next = NULL;
2974 struct btrfs_key key;
2975 u32 nritems;
2976 int ret;
2977
2978 nritems = btrfs_header_nritems(path->nodes[0]);
2979 if (nritems == 0) {
2980 return 1;
2981 }
2982
2983 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
2984
2985 btrfs_release_path(root, path);
2986 path->keep_locks = 1;
2987 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2988 path->keep_locks = 0;
2989
2990 if (ret < 0)
2991 return ret;
2992
2993 nritems = btrfs_header_nritems(path->nodes[0]);
2994 if (nritems > 0 && path->slots[0] < nritems - 1) {
2995 goto done;
2996 }
2997
2998 while(level < BTRFS_MAX_LEVEL) {
2999 if (!path->nodes[level])
3000 return 1;
3001
3002 slot = path->slots[level] + 1;
3003 c = path->nodes[level];
3004 if (slot >= btrfs_header_nritems(c)) {
3005 level++;
3006 if (level == BTRFS_MAX_LEVEL) {
3007 return 1;
3008 }
3009 continue;
3010 }
3011
3012 if (next) {
3013 btrfs_tree_unlock(next);
3014 free_extent_buffer(next);
3015 }
3016
3017 if (level == 1 && path->locks[1] && path->reada)
3018 reada_for_search(root, path, level, slot, 0);
3019
3020 next = read_node_slot(root, c, slot);
3021 WARN_ON(!btrfs_tree_locked(c));
3022 btrfs_tree_lock(next);
3023 break;
3024 }
3025 path->slots[level] = slot;
3026 while(1) {
3027 level--;
3028 c = path->nodes[level];
3029 if (path->locks[level])
3030 btrfs_tree_unlock(c);
3031 free_extent_buffer(c);
3032 path->nodes[level] = next;
3033 path->slots[level] = 0;
3034 path->locks[level] = 1;
3035 if (!level)
3036 break;
3037 if (level == 1 && path->locks[1] && path->reada)
3038 reada_for_search(root, path, level, slot, 0);
3039 next = read_node_slot(root, next, 0);
3040 WARN_ON(!btrfs_tree_locked(path->nodes[level]));
3041 btrfs_tree_lock(next);
3042 }
3043 done:
3044 unlock_up(path, 0, 1);
3045 return 0;
3046 }
3047
3048 int btrfs_previous_item(struct btrfs_root *root,
3049 struct btrfs_path *path, u64 min_objectid,
3050 int type)
3051 {
3052 struct btrfs_key found_key;
3053 struct extent_buffer *leaf;
3054 int ret;
3055
3056 while(1) {
3057 if (path->slots[0] == 0) {
3058 ret = btrfs_prev_leaf(root, path);
3059 if (ret != 0)
3060 return ret;
3061 } else {
3062 path->slots[0]--;
3063 }
3064 leaf = path->nodes[0];
3065 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3066 if (found_key.type == type)
3067 return 0;
3068 }
3069 return 1;
3070 }
3071
This page took 0.091381 seconds and 6 git commands to generate.