2fb4ab659a0ff72904175a82cc6964d37e0db13f
[deliverable/linux.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44
45 struct btrfs_path *btrfs_alloc_path(void)
46 {
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
49 return path;
50 }
51
52 /*
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
55 */
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 {
58 int i;
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
61 continue;
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
67 }
68 }
69
70 /*
71 * reset all the locked nodes in the patch to spinning locks.
72 *
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
76 * for held
77 */
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
80 {
81 int i;
82
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
88 * the path blocking.
89 */
90 if (held) {
91 btrfs_set_lock_blocking_rw(held, held_rw);
92 if (held_rw == BTRFS_WRITE_LOCK)
93 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
94 else if (held_rw == BTRFS_READ_LOCK)
95 held_rw = BTRFS_READ_LOCK_BLOCKING;
96 }
97 btrfs_set_path_blocking(p);
98 #endif
99
100 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 if (p->nodes[i] && p->locks[i]) {
102 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
103 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
104 p->locks[i] = BTRFS_WRITE_LOCK;
105 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
106 p->locks[i] = BTRFS_READ_LOCK;
107 }
108 }
109
110 #ifdef CONFIG_DEBUG_LOCK_ALLOC
111 if (held)
112 btrfs_clear_lock_blocking_rw(held, held_rw);
113 #endif
114 }
115
116 /* this also releases the path */
117 void btrfs_free_path(struct btrfs_path *p)
118 {
119 if (!p)
120 return;
121 btrfs_release_path(p);
122 kmem_cache_free(btrfs_path_cachep, p);
123 }
124
125 /*
126 * path release drops references on the extent buffers in the path
127 * and it drops any locks held by this path
128 *
129 * It is safe to call this on paths that no locks or extent buffers held.
130 */
131 noinline void btrfs_release_path(struct btrfs_path *p)
132 {
133 int i;
134
135 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
136 p->slots[i] = 0;
137 if (!p->nodes[i])
138 continue;
139 if (p->locks[i]) {
140 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
141 p->locks[i] = 0;
142 }
143 free_extent_buffer(p->nodes[i]);
144 p->nodes[i] = NULL;
145 }
146 }
147
148 /*
149 * safely gets a reference on the root node of a tree. A lock
150 * is not taken, so a concurrent writer may put a different node
151 * at the root of the tree. See btrfs_lock_root_node for the
152 * looping required.
153 *
154 * The extent buffer returned by this has a reference taken, so
155 * it won't disappear. It may stop being the root of the tree
156 * at any time because there are no locks held.
157 */
158 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
159 {
160 struct extent_buffer *eb;
161
162 while (1) {
163 rcu_read_lock();
164 eb = rcu_dereference(root->node);
165
166 /*
167 * RCU really hurts here, we could free up the root node because
168 * it was cow'ed but we may not get the new root node yet so do
169 * the inc_not_zero dance and if it doesn't work then
170 * synchronize_rcu and try again.
171 */
172 if (atomic_inc_not_zero(&eb->refs)) {
173 rcu_read_unlock();
174 break;
175 }
176 rcu_read_unlock();
177 synchronize_rcu();
178 }
179 return eb;
180 }
181
182 /* loop around taking references on and locking the root node of the
183 * tree until you end up with a lock on the root. A locked buffer
184 * is returned, with a reference held.
185 */
186 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
187 {
188 struct extent_buffer *eb;
189
190 while (1) {
191 eb = btrfs_root_node(root);
192 btrfs_tree_lock(eb);
193 if (eb == root->node)
194 break;
195 btrfs_tree_unlock(eb);
196 free_extent_buffer(eb);
197 }
198 return eb;
199 }
200
201 /* loop around taking references on and locking the root node of the
202 * tree until you end up with a lock on the root. A locked buffer
203 * is returned, with a reference held.
204 */
205 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
206 {
207 struct extent_buffer *eb;
208
209 while (1) {
210 eb = btrfs_root_node(root);
211 btrfs_tree_read_lock(eb);
212 if (eb == root->node)
213 break;
214 btrfs_tree_read_unlock(eb);
215 free_extent_buffer(eb);
216 }
217 return eb;
218 }
219
220 /* cowonly root (everything not a reference counted cow subvolume), just get
221 * put onto a simple dirty list. transaction.c walks this to make sure they
222 * get properly updated on disk.
223 */
224 static void add_root_to_dirty_list(struct btrfs_root *root)
225 {
226 spin_lock(&root->fs_info->trans_lock);
227 if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
228 list_empty(&root->dirty_list)) {
229 list_add(&root->dirty_list,
230 &root->fs_info->dirty_cowonly_roots);
231 }
232 spin_unlock(&root->fs_info->trans_lock);
233 }
234
235 /*
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
239 */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 struct btrfs_root *root,
242 struct extent_buffer *buf,
243 struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 struct extent_buffer *cow;
246 int ret = 0;
247 int level;
248 struct btrfs_disk_key disk_key;
249
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->fs_info->running_transaction->transid);
252 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
253 trans->transid != root->last_trans);
254
255 level = btrfs_header_level(buf);
256 if (level == 0)
257 btrfs_item_key(buf, &disk_key, 0);
258 else
259 btrfs_node_key(buf, &disk_key, 0);
260
261 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
262 new_root_objectid, &disk_key, level,
263 buf->start, 0);
264 if (IS_ERR(cow))
265 return PTR_ERR(cow);
266
267 copy_extent_buffer(cow, buf, 0, 0, cow->len);
268 btrfs_set_header_bytenr(cow, cow->start);
269 btrfs_set_header_generation(cow, trans->transid);
270 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
271 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
272 BTRFS_HEADER_FLAG_RELOC);
273 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
274 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
275 else
276 btrfs_set_header_owner(cow, new_root_objectid);
277
278 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
279 BTRFS_FSID_SIZE);
280
281 WARN_ON(btrfs_header_generation(buf) > trans->transid);
282 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
283 ret = btrfs_inc_ref(trans, root, cow, 1);
284 else
285 ret = btrfs_inc_ref(trans, root, cow, 0);
286
287 if (ret)
288 return ret;
289
290 btrfs_mark_buffer_dirty(cow);
291 *cow_ret = cow;
292 return 0;
293 }
294
295 enum mod_log_op {
296 MOD_LOG_KEY_REPLACE,
297 MOD_LOG_KEY_ADD,
298 MOD_LOG_KEY_REMOVE,
299 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
300 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
301 MOD_LOG_MOVE_KEYS,
302 MOD_LOG_ROOT_REPLACE,
303 };
304
305 struct tree_mod_move {
306 int dst_slot;
307 int nr_items;
308 };
309
310 struct tree_mod_root {
311 u64 logical;
312 u8 level;
313 };
314
315 struct tree_mod_elem {
316 struct rb_node node;
317 u64 index; /* shifted logical */
318 u64 seq;
319 enum mod_log_op op;
320
321 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 int slot;
323
324 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 u64 generation;
326
327 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
328 struct btrfs_disk_key key;
329 u64 blockptr;
330
331 /* this is used for op == MOD_LOG_MOVE_KEYS */
332 struct tree_mod_move move;
333
334 /* this is used for op == MOD_LOG_ROOT_REPLACE */
335 struct tree_mod_root old_root;
336 };
337
338 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
339 {
340 read_lock(&fs_info->tree_mod_log_lock);
341 }
342
343 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344 {
345 read_unlock(&fs_info->tree_mod_log_lock);
346 }
347
348 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349 {
350 write_lock(&fs_info->tree_mod_log_lock);
351 }
352
353 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354 {
355 write_unlock(&fs_info->tree_mod_log_lock);
356 }
357
358 /*
359 * Pull a new tree mod seq number for our operation.
360 */
361 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
362 {
363 return atomic64_inc_return(&fs_info->tree_mod_seq);
364 }
365
366 /*
367 * This adds a new blocker to the tree mod log's blocker list if the @elem
368 * passed does not already have a sequence number set. So when a caller expects
369 * to record tree modifications, it should ensure to set elem->seq to zero
370 * before calling btrfs_get_tree_mod_seq.
371 * Returns a fresh, unused tree log modification sequence number, even if no new
372 * blocker was added.
373 */
374 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
375 struct seq_list *elem)
376 {
377 tree_mod_log_write_lock(fs_info);
378 spin_lock(&fs_info->tree_mod_seq_lock);
379 if (!elem->seq) {
380 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
381 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
382 }
383 spin_unlock(&fs_info->tree_mod_seq_lock);
384 tree_mod_log_write_unlock(fs_info);
385
386 return elem->seq;
387 }
388
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
390 struct seq_list *elem)
391 {
392 struct rb_root *tm_root;
393 struct rb_node *node;
394 struct rb_node *next;
395 struct seq_list *cur_elem;
396 struct tree_mod_elem *tm;
397 u64 min_seq = (u64)-1;
398 u64 seq_putting = elem->seq;
399
400 if (!seq_putting)
401 return;
402
403 spin_lock(&fs_info->tree_mod_seq_lock);
404 list_del(&elem->list);
405 elem->seq = 0;
406
407 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
408 if (cur_elem->seq < min_seq) {
409 if (seq_putting > cur_elem->seq) {
410 /*
411 * blocker with lower sequence number exists, we
412 * cannot remove anything from the log
413 */
414 spin_unlock(&fs_info->tree_mod_seq_lock);
415 return;
416 }
417 min_seq = cur_elem->seq;
418 }
419 }
420 spin_unlock(&fs_info->tree_mod_seq_lock);
421
422 /*
423 * anything that's lower than the lowest existing (read: blocked)
424 * sequence number can be removed from the tree.
425 */
426 tree_mod_log_write_lock(fs_info);
427 tm_root = &fs_info->tree_mod_log;
428 for (node = rb_first(tm_root); node; node = next) {
429 next = rb_next(node);
430 tm = container_of(node, struct tree_mod_elem, node);
431 if (tm->seq > min_seq)
432 continue;
433 rb_erase(node, tm_root);
434 kfree(tm);
435 }
436 tree_mod_log_write_unlock(fs_info);
437 }
438
439 /*
440 * key order of the log:
441 * index -> sequence
442 *
443 * the index is the shifted logical of the *new* root node for root replace
444 * operations, or the shifted logical of the affected block for all other
445 * operations.
446 *
447 * Note: must be called with write lock (tree_mod_log_write_lock).
448 */
449 static noinline int
450 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
451 {
452 struct rb_root *tm_root;
453 struct rb_node **new;
454 struct rb_node *parent = NULL;
455 struct tree_mod_elem *cur;
456
457 BUG_ON(!tm);
458
459 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
460
461 tm_root = &fs_info->tree_mod_log;
462 new = &tm_root->rb_node;
463 while (*new) {
464 cur = container_of(*new, struct tree_mod_elem, node);
465 parent = *new;
466 if (cur->index < tm->index)
467 new = &((*new)->rb_left);
468 else if (cur->index > tm->index)
469 new = &((*new)->rb_right);
470 else if (cur->seq < tm->seq)
471 new = &((*new)->rb_left);
472 else if (cur->seq > tm->seq)
473 new = &((*new)->rb_right);
474 else
475 return -EEXIST;
476 }
477
478 rb_link_node(&tm->node, parent, new);
479 rb_insert_color(&tm->node, tm_root);
480 return 0;
481 }
482
483 /*
484 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
485 * returns zero with the tree_mod_log_lock acquired. The caller must hold
486 * this until all tree mod log insertions are recorded in the rb tree and then
487 * call tree_mod_log_write_unlock() to release.
488 */
489 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
490 struct extent_buffer *eb) {
491 smp_mb();
492 if (list_empty(&(fs_info)->tree_mod_seq_list))
493 return 1;
494 if (eb && btrfs_header_level(eb) == 0)
495 return 1;
496
497 tree_mod_log_write_lock(fs_info);
498 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
499 tree_mod_log_write_unlock(fs_info);
500 return 1;
501 }
502
503 return 0;
504 }
505
506 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
507 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
508 struct extent_buffer *eb)
509 {
510 smp_mb();
511 if (list_empty(&(fs_info)->tree_mod_seq_list))
512 return 0;
513 if (eb && btrfs_header_level(eb) == 0)
514 return 0;
515
516 return 1;
517 }
518
519 static struct tree_mod_elem *
520 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
521 enum mod_log_op op, gfp_t flags)
522 {
523 struct tree_mod_elem *tm;
524
525 tm = kzalloc(sizeof(*tm), flags);
526 if (!tm)
527 return NULL;
528
529 tm->index = eb->start >> PAGE_CACHE_SHIFT;
530 if (op != MOD_LOG_KEY_ADD) {
531 btrfs_node_key(eb, &tm->key, slot);
532 tm->blockptr = btrfs_node_blockptr(eb, slot);
533 }
534 tm->op = op;
535 tm->slot = slot;
536 tm->generation = btrfs_node_ptr_generation(eb, slot);
537 RB_CLEAR_NODE(&tm->node);
538
539 return tm;
540 }
541
542 static noinline int
543 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
544 struct extent_buffer *eb, int slot,
545 enum mod_log_op op, gfp_t flags)
546 {
547 struct tree_mod_elem *tm;
548 int ret;
549
550 if (!tree_mod_need_log(fs_info, eb))
551 return 0;
552
553 tm = alloc_tree_mod_elem(eb, slot, op, flags);
554 if (!tm)
555 return -ENOMEM;
556
557 if (tree_mod_dont_log(fs_info, eb)) {
558 kfree(tm);
559 return 0;
560 }
561
562 ret = __tree_mod_log_insert(fs_info, tm);
563 tree_mod_log_write_unlock(fs_info);
564 if (ret)
565 kfree(tm);
566
567 return ret;
568 }
569
570 static noinline int
571 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
572 struct extent_buffer *eb, int dst_slot, int src_slot,
573 int nr_items, gfp_t flags)
574 {
575 struct tree_mod_elem *tm = NULL;
576 struct tree_mod_elem **tm_list = NULL;
577 int ret = 0;
578 int i;
579 int locked = 0;
580
581 if (!tree_mod_need_log(fs_info, eb))
582 return 0;
583
584 tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
585 if (!tm_list)
586 return -ENOMEM;
587
588 tm = kzalloc(sizeof(*tm), flags);
589 if (!tm) {
590 ret = -ENOMEM;
591 goto free_tms;
592 }
593
594 tm->index = eb->start >> PAGE_CACHE_SHIFT;
595 tm->slot = src_slot;
596 tm->move.dst_slot = dst_slot;
597 tm->move.nr_items = nr_items;
598 tm->op = MOD_LOG_MOVE_KEYS;
599
600 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
601 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
602 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
603 if (!tm_list[i]) {
604 ret = -ENOMEM;
605 goto free_tms;
606 }
607 }
608
609 if (tree_mod_dont_log(fs_info, eb))
610 goto free_tms;
611 locked = 1;
612
613 /*
614 * When we override something during the move, we log these removals.
615 * This can only happen when we move towards the beginning of the
616 * buffer, i.e. dst_slot < src_slot.
617 */
618 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
619 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
620 if (ret)
621 goto free_tms;
622 }
623
624 ret = __tree_mod_log_insert(fs_info, tm);
625 if (ret)
626 goto free_tms;
627 tree_mod_log_write_unlock(fs_info);
628 kfree(tm_list);
629
630 return 0;
631 free_tms:
632 for (i = 0; i < nr_items; i++) {
633 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
634 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
635 kfree(tm_list[i]);
636 }
637 if (locked)
638 tree_mod_log_write_unlock(fs_info);
639 kfree(tm_list);
640 kfree(tm);
641
642 return ret;
643 }
644
645 static inline int
646 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
647 struct tree_mod_elem **tm_list,
648 int nritems)
649 {
650 int i, j;
651 int ret;
652
653 for (i = nritems - 1; i >= 0; i--) {
654 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
655 if (ret) {
656 for (j = nritems - 1; j > i; j--)
657 rb_erase(&tm_list[j]->node,
658 &fs_info->tree_mod_log);
659 return ret;
660 }
661 }
662
663 return 0;
664 }
665
666 static noinline int
667 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
668 struct extent_buffer *old_root,
669 struct extent_buffer *new_root, gfp_t flags,
670 int log_removal)
671 {
672 struct tree_mod_elem *tm = NULL;
673 struct tree_mod_elem **tm_list = NULL;
674 int nritems = 0;
675 int ret = 0;
676 int i;
677
678 if (!tree_mod_need_log(fs_info, NULL))
679 return 0;
680
681 if (log_removal && btrfs_header_level(old_root) > 0) {
682 nritems = btrfs_header_nritems(old_root);
683 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
684 flags);
685 if (!tm_list) {
686 ret = -ENOMEM;
687 goto free_tms;
688 }
689 for (i = 0; i < nritems; i++) {
690 tm_list[i] = alloc_tree_mod_elem(old_root, i,
691 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
692 if (!tm_list[i]) {
693 ret = -ENOMEM;
694 goto free_tms;
695 }
696 }
697 }
698
699 tm = kzalloc(sizeof(*tm), flags);
700 if (!tm) {
701 ret = -ENOMEM;
702 goto free_tms;
703 }
704
705 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
706 tm->old_root.logical = old_root->start;
707 tm->old_root.level = btrfs_header_level(old_root);
708 tm->generation = btrfs_header_generation(old_root);
709 tm->op = MOD_LOG_ROOT_REPLACE;
710
711 if (tree_mod_dont_log(fs_info, NULL))
712 goto free_tms;
713
714 if (tm_list)
715 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
716 if (!ret)
717 ret = __tree_mod_log_insert(fs_info, tm);
718
719 tree_mod_log_write_unlock(fs_info);
720 if (ret)
721 goto free_tms;
722 kfree(tm_list);
723
724 return ret;
725
726 free_tms:
727 if (tm_list) {
728 for (i = 0; i < nritems; i++)
729 kfree(tm_list[i]);
730 kfree(tm_list);
731 }
732 kfree(tm);
733
734 return ret;
735 }
736
737 static struct tree_mod_elem *
738 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
739 int smallest)
740 {
741 struct rb_root *tm_root;
742 struct rb_node *node;
743 struct tree_mod_elem *cur = NULL;
744 struct tree_mod_elem *found = NULL;
745 u64 index = start >> PAGE_CACHE_SHIFT;
746
747 tree_mod_log_read_lock(fs_info);
748 tm_root = &fs_info->tree_mod_log;
749 node = tm_root->rb_node;
750 while (node) {
751 cur = container_of(node, struct tree_mod_elem, node);
752 if (cur->index < index) {
753 node = node->rb_left;
754 } else if (cur->index > index) {
755 node = node->rb_right;
756 } else if (cur->seq < min_seq) {
757 node = node->rb_left;
758 } else if (!smallest) {
759 /* we want the node with the highest seq */
760 if (found)
761 BUG_ON(found->seq > cur->seq);
762 found = cur;
763 node = node->rb_left;
764 } else if (cur->seq > min_seq) {
765 /* we want the node with the smallest seq */
766 if (found)
767 BUG_ON(found->seq < cur->seq);
768 found = cur;
769 node = node->rb_right;
770 } else {
771 found = cur;
772 break;
773 }
774 }
775 tree_mod_log_read_unlock(fs_info);
776
777 return found;
778 }
779
780 /*
781 * this returns the element from the log with the smallest time sequence
782 * value that's in the log (the oldest log item). any element with a time
783 * sequence lower than min_seq will be ignored.
784 */
785 static struct tree_mod_elem *
786 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
787 u64 min_seq)
788 {
789 return __tree_mod_log_search(fs_info, start, min_seq, 1);
790 }
791
792 /*
793 * this returns the element from the log with the largest time sequence
794 * value that's in the log (the most recent log item). any element with
795 * a time sequence lower than min_seq will be ignored.
796 */
797 static struct tree_mod_elem *
798 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
799 {
800 return __tree_mod_log_search(fs_info, start, min_seq, 0);
801 }
802
803 static noinline int
804 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
805 struct extent_buffer *src, unsigned long dst_offset,
806 unsigned long src_offset, int nr_items)
807 {
808 int ret = 0;
809 struct tree_mod_elem **tm_list = NULL;
810 struct tree_mod_elem **tm_list_add, **tm_list_rem;
811 int i;
812 int locked = 0;
813
814 if (!tree_mod_need_log(fs_info, NULL))
815 return 0;
816
817 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
818 return 0;
819
820 tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
821 GFP_NOFS);
822 if (!tm_list)
823 return -ENOMEM;
824
825 tm_list_add = tm_list;
826 tm_list_rem = tm_list + nr_items;
827 for (i = 0; i < nr_items; i++) {
828 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
829 MOD_LOG_KEY_REMOVE, GFP_NOFS);
830 if (!tm_list_rem[i]) {
831 ret = -ENOMEM;
832 goto free_tms;
833 }
834
835 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
836 MOD_LOG_KEY_ADD, GFP_NOFS);
837 if (!tm_list_add[i]) {
838 ret = -ENOMEM;
839 goto free_tms;
840 }
841 }
842
843 if (tree_mod_dont_log(fs_info, NULL))
844 goto free_tms;
845 locked = 1;
846
847 for (i = 0; i < nr_items; i++) {
848 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
849 if (ret)
850 goto free_tms;
851 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
852 if (ret)
853 goto free_tms;
854 }
855
856 tree_mod_log_write_unlock(fs_info);
857 kfree(tm_list);
858
859 return 0;
860
861 free_tms:
862 for (i = 0; i < nr_items * 2; i++) {
863 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
864 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
865 kfree(tm_list[i]);
866 }
867 if (locked)
868 tree_mod_log_write_unlock(fs_info);
869 kfree(tm_list);
870
871 return ret;
872 }
873
874 static inline void
875 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
876 int dst_offset, int src_offset, int nr_items)
877 {
878 int ret;
879 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
880 nr_items, GFP_NOFS);
881 BUG_ON(ret < 0);
882 }
883
884 static noinline void
885 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
886 struct extent_buffer *eb, int slot, int atomic)
887 {
888 int ret;
889
890 ret = tree_mod_log_insert_key(fs_info, eb, slot,
891 MOD_LOG_KEY_REPLACE,
892 atomic ? GFP_ATOMIC : GFP_NOFS);
893 BUG_ON(ret < 0);
894 }
895
896 static noinline int
897 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
898 {
899 struct tree_mod_elem **tm_list = NULL;
900 int nritems = 0;
901 int i;
902 int ret = 0;
903
904 if (btrfs_header_level(eb) == 0)
905 return 0;
906
907 if (!tree_mod_need_log(fs_info, NULL))
908 return 0;
909
910 nritems = btrfs_header_nritems(eb);
911 tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
912 GFP_NOFS);
913 if (!tm_list)
914 return -ENOMEM;
915
916 for (i = 0; i < nritems; i++) {
917 tm_list[i] = alloc_tree_mod_elem(eb, i,
918 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
919 if (!tm_list[i]) {
920 ret = -ENOMEM;
921 goto free_tms;
922 }
923 }
924
925 if (tree_mod_dont_log(fs_info, eb))
926 goto free_tms;
927
928 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
929 tree_mod_log_write_unlock(fs_info);
930 if (ret)
931 goto free_tms;
932 kfree(tm_list);
933
934 return 0;
935
936 free_tms:
937 for (i = 0; i < nritems; i++)
938 kfree(tm_list[i]);
939 kfree(tm_list);
940
941 return ret;
942 }
943
944 static noinline void
945 tree_mod_log_set_root_pointer(struct btrfs_root *root,
946 struct extent_buffer *new_root_node,
947 int log_removal)
948 {
949 int ret;
950 ret = tree_mod_log_insert_root(root->fs_info, root->node,
951 new_root_node, GFP_NOFS, log_removal);
952 BUG_ON(ret < 0);
953 }
954
955 /*
956 * check if the tree block can be shared by multiple trees
957 */
958 int btrfs_block_can_be_shared(struct btrfs_root *root,
959 struct extent_buffer *buf)
960 {
961 /*
962 * Tree blocks not in refernece counted trees and tree roots
963 * are never shared. If a block was allocated after the last
964 * snapshot and the block was not allocated by tree relocation,
965 * we know the block is not shared.
966 */
967 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
968 buf != root->node && buf != root->commit_root &&
969 (btrfs_header_generation(buf) <=
970 btrfs_root_last_snapshot(&root->root_item) ||
971 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
972 return 1;
973 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
974 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
975 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
976 return 1;
977 #endif
978 return 0;
979 }
980
981 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
982 struct btrfs_root *root,
983 struct extent_buffer *buf,
984 struct extent_buffer *cow,
985 int *last_ref)
986 {
987 u64 refs;
988 u64 owner;
989 u64 flags;
990 u64 new_flags = 0;
991 int ret;
992
993 /*
994 * Backrefs update rules:
995 *
996 * Always use full backrefs for extent pointers in tree block
997 * allocated by tree relocation.
998 *
999 * If a shared tree block is no longer referenced by its owner
1000 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
1001 * use full backrefs for extent pointers in tree block.
1002 *
1003 * If a tree block is been relocating
1004 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1005 * use full backrefs for extent pointers in tree block.
1006 * The reason for this is some operations (such as drop tree)
1007 * are only allowed for blocks use full backrefs.
1008 */
1009
1010 if (btrfs_block_can_be_shared(root, buf)) {
1011 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1012 btrfs_header_level(buf), 1,
1013 &refs, &flags);
1014 if (ret)
1015 return ret;
1016 if (refs == 0) {
1017 ret = -EROFS;
1018 btrfs_std_error(root->fs_info, ret);
1019 return ret;
1020 }
1021 } else {
1022 refs = 1;
1023 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1024 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1025 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1026 else
1027 flags = 0;
1028 }
1029
1030 owner = btrfs_header_owner(buf);
1031 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1032 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1033
1034 if (refs > 1) {
1035 if ((owner == root->root_key.objectid ||
1036 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1037 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1038 ret = btrfs_inc_ref(trans, root, buf, 1);
1039 BUG_ON(ret); /* -ENOMEM */
1040
1041 if (root->root_key.objectid ==
1042 BTRFS_TREE_RELOC_OBJECTID) {
1043 ret = btrfs_dec_ref(trans, root, buf, 0);
1044 BUG_ON(ret); /* -ENOMEM */
1045 ret = btrfs_inc_ref(trans, root, cow, 1);
1046 BUG_ON(ret); /* -ENOMEM */
1047 }
1048 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1049 } else {
1050
1051 if (root->root_key.objectid ==
1052 BTRFS_TREE_RELOC_OBJECTID)
1053 ret = btrfs_inc_ref(trans, root, cow, 1);
1054 else
1055 ret = btrfs_inc_ref(trans, root, cow, 0);
1056 BUG_ON(ret); /* -ENOMEM */
1057 }
1058 if (new_flags != 0) {
1059 int level = btrfs_header_level(buf);
1060
1061 ret = btrfs_set_disk_extent_flags(trans, root,
1062 buf->start,
1063 buf->len,
1064 new_flags, level, 0);
1065 if (ret)
1066 return ret;
1067 }
1068 } else {
1069 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1070 if (root->root_key.objectid ==
1071 BTRFS_TREE_RELOC_OBJECTID)
1072 ret = btrfs_inc_ref(trans, root, cow, 1);
1073 else
1074 ret = btrfs_inc_ref(trans, root, cow, 0);
1075 BUG_ON(ret); /* -ENOMEM */
1076 ret = btrfs_dec_ref(trans, root, buf, 1);
1077 BUG_ON(ret); /* -ENOMEM */
1078 }
1079 clean_tree_block(trans, root, buf);
1080 *last_ref = 1;
1081 }
1082 return 0;
1083 }
1084
1085 /*
1086 * does the dirty work in cow of a single block. The parent block (if
1087 * supplied) is updated to point to the new cow copy. The new buffer is marked
1088 * dirty and returned locked. If you modify the block it needs to be marked
1089 * dirty again.
1090 *
1091 * search_start -- an allocation hint for the new block
1092 *
1093 * empty_size -- a hint that you plan on doing more cow. This is the size in
1094 * bytes the allocator should try to find free next to the block it returns.
1095 * This is just a hint and may be ignored by the allocator.
1096 */
1097 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1098 struct btrfs_root *root,
1099 struct extent_buffer *buf,
1100 struct extent_buffer *parent, int parent_slot,
1101 struct extent_buffer **cow_ret,
1102 u64 search_start, u64 empty_size)
1103 {
1104 struct btrfs_disk_key disk_key;
1105 struct extent_buffer *cow;
1106 int level, ret;
1107 int last_ref = 0;
1108 int unlock_orig = 0;
1109 u64 parent_start;
1110
1111 if (*cow_ret == buf)
1112 unlock_orig = 1;
1113
1114 btrfs_assert_tree_locked(buf);
1115
1116 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1117 trans->transid != root->fs_info->running_transaction->transid);
1118 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1119 trans->transid != root->last_trans);
1120
1121 level = btrfs_header_level(buf);
1122
1123 if (level == 0)
1124 btrfs_item_key(buf, &disk_key, 0);
1125 else
1126 btrfs_node_key(buf, &disk_key, 0);
1127
1128 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1129 if (parent)
1130 parent_start = parent->start;
1131 else
1132 parent_start = 0;
1133 } else
1134 parent_start = 0;
1135
1136 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
1137 root->root_key.objectid, &disk_key,
1138 level, search_start, empty_size);
1139 if (IS_ERR(cow))
1140 return PTR_ERR(cow);
1141
1142 /* cow is set to blocking by btrfs_init_new_buffer */
1143
1144 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1145 btrfs_set_header_bytenr(cow, cow->start);
1146 btrfs_set_header_generation(cow, trans->transid);
1147 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1148 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1149 BTRFS_HEADER_FLAG_RELOC);
1150 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1151 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1152 else
1153 btrfs_set_header_owner(cow, root->root_key.objectid);
1154
1155 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1156 BTRFS_FSID_SIZE);
1157
1158 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1159 if (ret) {
1160 btrfs_abort_transaction(trans, root, ret);
1161 return ret;
1162 }
1163
1164 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1165 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1166 if (ret)
1167 return ret;
1168 }
1169
1170 if (buf == root->node) {
1171 WARN_ON(parent && parent != buf);
1172 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1173 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1174 parent_start = buf->start;
1175 else
1176 parent_start = 0;
1177
1178 extent_buffer_get(cow);
1179 tree_mod_log_set_root_pointer(root, cow, 1);
1180 rcu_assign_pointer(root->node, cow);
1181
1182 btrfs_free_tree_block(trans, root, buf, parent_start,
1183 last_ref);
1184 free_extent_buffer(buf);
1185 add_root_to_dirty_list(root);
1186 } else {
1187 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1188 parent_start = parent->start;
1189 else
1190 parent_start = 0;
1191
1192 WARN_ON(trans->transid != btrfs_header_generation(parent));
1193 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1194 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1195 btrfs_set_node_blockptr(parent, parent_slot,
1196 cow->start);
1197 btrfs_set_node_ptr_generation(parent, parent_slot,
1198 trans->transid);
1199 btrfs_mark_buffer_dirty(parent);
1200 if (last_ref) {
1201 ret = tree_mod_log_free_eb(root->fs_info, buf);
1202 if (ret) {
1203 btrfs_abort_transaction(trans, root, ret);
1204 return ret;
1205 }
1206 }
1207 btrfs_free_tree_block(trans, root, buf, parent_start,
1208 last_ref);
1209 }
1210 if (unlock_orig)
1211 btrfs_tree_unlock(buf);
1212 free_extent_buffer_stale(buf);
1213 btrfs_mark_buffer_dirty(cow);
1214 *cow_ret = cow;
1215 return 0;
1216 }
1217
1218 /*
1219 * returns the logical address of the oldest predecessor of the given root.
1220 * entries older than time_seq are ignored.
1221 */
1222 static struct tree_mod_elem *
1223 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1224 struct extent_buffer *eb_root, u64 time_seq)
1225 {
1226 struct tree_mod_elem *tm;
1227 struct tree_mod_elem *found = NULL;
1228 u64 root_logical = eb_root->start;
1229 int looped = 0;
1230
1231 if (!time_seq)
1232 return NULL;
1233
1234 /*
1235 * the very last operation that's logged for a root is the replacement
1236 * operation (if it is replaced at all). this has the index of the *new*
1237 * root, making it the very first operation that's logged for this root.
1238 */
1239 while (1) {
1240 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1241 time_seq);
1242 if (!looped && !tm)
1243 return NULL;
1244 /*
1245 * if there are no tree operation for the oldest root, we simply
1246 * return it. this should only happen if that (old) root is at
1247 * level 0.
1248 */
1249 if (!tm)
1250 break;
1251
1252 /*
1253 * if there's an operation that's not a root replacement, we
1254 * found the oldest version of our root. normally, we'll find a
1255 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1256 */
1257 if (tm->op != MOD_LOG_ROOT_REPLACE)
1258 break;
1259
1260 found = tm;
1261 root_logical = tm->old_root.logical;
1262 looped = 1;
1263 }
1264
1265 /* if there's no old root to return, return what we found instead */
1266 if (!found)
1267 found = tm;
1268
1269 return found;
1270 }
1271
1272 /*
1273 * tm is a pointer to the first operation to rewind within eb. then, all
1274 * previous operations will be rewinded (until we reach something older than
1275 * time_seq).
1276 */
1277 static void
1278 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1279 u64 time_seq, struct tree_mod_elem *first_tm)
1280 {
1281 u32 n;
1282 struct rb_node *next;
1283 struct tree_mod_elem *tm = first_tm;
1284 unsigned long o_dst;
1285 unsigned long o_src;
1286 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1287
1288 n = btrfs_header_nritems(eb);
1289 tree_mod_log_read_lock(fs_info);
1290 while (tm && tm->seq >= time_seq) {
1291 /*
1292 * all the operations are recorded with the operator used for
1293 * the modification. as we're going backwards, we do the
1294 * opposite of each operation here.
1295 */
1296 switch (tm->op) {
1297 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1298 BUG_ON(tm->slot < n);
1299 /* Fallthrough */
1300 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1301 case MOD_LOG_KEY_REMOVE:
1302 btrfs_set_node_key(eb, &tm->key, tm->slot);
1303 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1304 btrfs_set_node_ptr_generation(eb, tm->slot,
1305 tm->generation);
1306 n++;
1307 break;
1308 case MOD_LOG_KEY_REPLACE:
1309 BUG_ON(tm->slot >= n);
1310 btrfs_set_node_key(eb, &tm->key, tm->slot);
1311 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1312 btrfs_set_node_ptr_generation(eb, tm->slot,
1313 tm->generation);
1314 break;
1315 case MOD_LOG_KEY_ADD:
1316 /* if a move operation is needed it's in the log */
1317 n--;
1318 break;
1319 case MOD_LOG_MOVE_KEYS:
1320 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1321 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1322 memmove_extent_buffer(eb, o_dst, o_src,
1323 tm->move.nr_items * p_size);
1324 break;
1325 case MOD_LOG_ROOT_REPLACE:
1326 /*
1327 * this operation is special. for roots, this must be
1328 * handled explicitly before rewinding.
1329 * for non-roots, this operation may exist if the node
1330 * was a root: root A -> child B; then A gets empty and
1331 * B is promoted to the new root. in the mod log, we'll
1332 * have a root-replace operation for B, a tree block
1333 * that is no root. we simply ignore that operation.
1334 */
1335 break;
1336 }
1337 next = rb_next(&tm->node);
1338 if (!next)
1339 break;
1340 tm = container_of(next, struct tree_mod_elem, node);
1341 if (tm->index != first_tm->index)
1342 break;
1343 }
1344 tree_mod_log_read_unlock(fs_info);
1345 btrfs_set_header_nritems(eb, n);
1346 }
1347
1348 /*
1349 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1350 * is returned. If rewind operations happen, a fresh buffer is returned. The
1351 * returned buffer is always read-locked. If the returned buffer is not the
1352 * input buffer, the lock on the input buffer is released and the input buffer
1353 * is freed (its refcount is decremented).
1354 */
1355 static struct extent_buffer *
1356 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1357 struct extent_buffer *eb, u64 time_seq)
1358 {
1359 struct extent_buffer *eb_rewin;
1360 struct tree_mod_elem *tm;
1361
1362 if (!time_seq)
1363 return eb;
1364
1365 if (btrfs_header_level(eb) == 0)
1366 return eb;
1367
1368 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1369 if (!tm)
1370 return eb;
1371
1372 btrfs_set_path_blocking(path);
1373 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1374
1375 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1376 BUG_ON(tm->slot != 0);
1377 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1378 fs_info->tree_root->nodesize);
1379 if (!eb_rewin) {
1380 btrfs_tree_read_unlock_blocking(eb);
1381 free_extent_buffer(eb);
1382 return NULL;
1383 }
1384 btrfs_set_header_bytenr(eb_rewin, eb->start);
1385 btrfs_set_header_backref_rev(eb_rewin,
1386 btrfs_header_backref_rev(eb));
1387 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1388 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1389 } else {
1390 eb_rewin = btrfs_clone_extent_buffer(eb);
1391 if (!eb_rewin) {
1392 btrfs_tree_read_unlock_blocking(eb);
1393 free_extent_buffer(eb);
1394 return NULL;
1395 }
1396 }
1397
1398 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1399 btrfs_tree_read_unlock_blocking(eb);
1400 free_extent_buffer(eb);
1401
1402 extent_buffer_get(eb_rewin);
1403 btrfs_tree_read_lock(eb_rewin);
1404 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1405 WARN_ON(btrfs_header_nritems(eb_rewin) >
1406 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1407
1408 return eb_rewin;
1409 }
1410
1411 /*
1412 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1413 * value. If there are no changes, the current root->root_node is returned. If
1414 * anything changed in between, there's a fresh buffer allocated on which the
1415 * rewind operations are done. In any case, the returned buffer is read locked.
1416 * Returns NULL on error (with no locks held).
1417 */
1418 static inline struct extent_buffer *
1419 get_old_root(struct btrfs_root *root, u64 time_seq)
1420 {
1421 struct tree_mod_elem *tm;
1422 struct extent_buffer *eb = NULL;
1423 struct extent_buffer *eb_root;
1424 struct extent_buffer *old;
1425 struct tree_mod_root *old_root = NULL;
1426 u64 old_generation = 0;
1427 u64 logical;
1428
1429 eb_root = btrfs_read_lock_root_node(root);
1430 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1431 if (!tm)
1432 return eb_root;
1433
1434 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1435 old_root = &tm->old_root;
1436 old_generation = tm->generation;
1437 logical = old_root->logical;
1438 } else {
1439 logical = eb_root->start;
1440 }
1441
1442 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1443 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1444 btrfs_tree_read_unlock(eb_root);
1445 free_extent_buffer(eb_root);
1446 old = read_tree_block(root, logical, 0);
1447 if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
1448 free_extent_buffer(old);
1449 btrfs_warn(root->fs_info,
1450 "failed to read tree block %llu from get_old_root", logical);
1451 } else {
1452 eb = btrfs_clone_extent_buffer(old);
1453 free_extent_buffer(old);
1454 }
1455 } else if (old_root) {
1456 btrfs_tree_read_unlock(eb_root);
1457 free_extent_buffer(eb_root);
1458 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1459 } else {
1460 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1461 eb = btrfs_clone_extent_buffer(eb_root);
1462 btrfs_tree_read_unlock_blocking(eb_root);
1463 free_extent_buffer(eb_root);
1464 }
1465
1466 if (!eb)
1467 return NULL;
1468 extent_buffer_get(eb);
1469 btrfs_tree_read_lock(eb);
1470 if (old_root) {
1471 btrfs_set_header_bytenr(eb, eb->start);
1472 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1473 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1474 btrfs_set_header_level(eb, old_root->level);
1475 btrfs_set_header_generation(eb, old_generation);
1476 }
1477 if (tm)
1478 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1479 else
1480 WARN_ON(btrfs_header_level(eb) != 0);
1481 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1482
1483 return eb;
1484 }
1485
1486 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1487 {
1488 struct tree_mod_elem *tm;
1489 int level;
1490 struct extent_buffer *eb_root = btrfs_root_node(root);
1491
1492 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1493 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1494 level = tm->old_root.level;
1495 } else {
1496 level = btrfs_header_level(eb_root);
1497 }
1498 free_extent_buffer(eb_root);
1499
1500 return level;
1501 }
1502
1503 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1504 struct btrfs_root *root,
1505 struct extent_buffer *buf)
1506 {
1507 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1508 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
1509 return 0;
1510 #endif
1511 /* ensure we can see the force_cow */
1512 smp_rmb();
1513
1514 /*
1515 * We do not need to cow a block if
1516 * 1) this block is not created or changed in this transaction;
1517 * 2) this block does not belong to TREE_RELOC tree;
1518 * 3) the root is not forced COW.
1519 *
1520 * What is forced COW:
1521 * when we create snapshot during commiting the transaction,
1522 * after we've finished coping src root, we must COW the shared
1523 * block to ensure the metadata consistency.
1524 */
1525 if (btrfs_header_generation(buf) == trans->transid &&
1526 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1527 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1528 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1529 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1530 return 0;
1531 return 1;
1532 }
1533
1534 /*
1535 * cows a single block, see __btrfs_cow_block for the real work.
1536 * This version of it has extra checks so that a block isn't cow'd more than
1537 * once per transaction, as long as it hasn't been written yet
1538 */
1539 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1540 struct btrfs_root *root, struct extent_buffer *buf,
1541 struct extent_buffer *parent, int parent_slot,
1542 struct extent_buffer **cow_ret)
1543 {
1544 u64 search_start;
1545 int ret;
1546
1547 if (trans->transaction != root->fs_info->running_transaction)
1548 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1549 trans->transid,
1550 root->fs_info->running_transaction->transid);
1551
1552 if (trans->transid != root->fs_info->generation)
1553 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1554 trans->transid, root->fs_info->generation);
1555
1556 if (!should_cow_block(trans, root, buf)) {
1557 *cow_ret = buf;
1558 return 0;
1559 }
1560
1561 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1562
1563 if (parent)
1564 btrfs_set_lock_blocking(parent);
1565 btrfs_set_lock_blocking(buf);
1566
1567 ret = __btrfs_cow_block(trans, root, buf, parent,
1568 parent_slot, cow_ret, search_start, 0);
1569
1570 trace_btrfs_cow_block(root, buf, *cow_ret);
1571
1572 return ret;
1573 }
1574
1575 /*
1576 * helper function for defrag to decide if two blocks pointed to by a
1577 * node are actually close by
1578 */
1579 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1580 {
1581 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1582 return 1;
1583 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1584 return 1;
1585 return 0;
1586 }
1587
1588 /*
1589 * compare two keys in a memcmp fashion
1590 */
1591 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1592 {
1593 struct btrfs_key k1;
1594
1595 btrfs_disk_key_to_cpu(&k1, disk);
1596
1597 return btrfs_comp_cpu_keys(&k1, k2);
1598 }
1599
1600 /*
1601 * same as comp_keys only with two btrfs_key's
1602 */
1603 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1604 {
1605 if (k1->objectid > k2->objectid)
1606 return 1;
1607 if (k1->objectid < k2->objectid)
1608 return -1;
1609 if (k1->type > k2->type)
1610 return 1;
1611 if (k1->type < k2->type)
1612 return -1;
1613 if (k1->offset > k2->offset)
1614 return 1;
1615 if (k1->offset < k2->offset)
1616 return -1;
1617 return 0;
1618 }
1619
1620 /*
1621 * this is used by the defrag code to go through all the
1622 * leaves pointed to by a node and reallocate them so that
1623 * disk order is close to key order
1624 */
1625 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1626 struct btrfs_root *root, struct extent_buffer *parent,
1627 int start_slot, u64 *last_ret,
1628 struct btrfs_key *progress)
1629 {
1630 struct extent_buffer *cur;
1631 u64 blocknr;
1632 u64 gen;
1633 u64 search_start = *last_ret;
1634 u64 last_block = 0;
1635 u64 other;
1636 u32 parent_nritems;
1637 int end_slot;
1638 int i;
1639 int err = 0;
1640 int parent_level;
1641 int uptodate;
1642 u32 blocksize;
1643 int progress_passed = 0;
1644 struct btrfs_disk_key disk_key;
1645
1646 parent_level = btrfs_header_level(parent);
1647
1648 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1649 WARN_ON(trans->transid != root->fs_info->generation);
1650
1651 parent_nritems = btrfs_header_nritems(parent);
1652 blocksize = root->nodesize;
1653 end_slot = parent_nritems;
1654
1655 if (parent_nritems == 1)
1656 return 0;
1657
1658 btrfs_set_lock_blocking(parent);
1659
1660 for (i = start_slot; i < end_slot; i++) {
1661 int close = 1;
1662
1663 btrfs_node_key(parent, &disk_key, i);
1664 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1665 continue;
1666
1667 progress_passed = 1;
1668 blocknr = btrfs_node_blockptr(parent, i);
1669 gen = btrfs_node_ptr_generation(parent, i);
1670 if (last_block == 0)
1671 last_block = blocknr;
1672
1673 if (i > 0) {
1674 other = btrfs_node_blockptr(parent, i - 1);
1675 close = close_blocks(blocknr, other, blocksize);
1676 }
1677 if (!close && i < end_slot - 2) {
1678 other = btrfs_node_blockptr(parent, i + 1);
1679 close = close_blocks(blocknr, other, blocksize);
1680 }
1681 if (close) {
1682 last_block = blocknr;
1683 continue;
1684 }
1685
1686 cur = btrfs_find_tree_block(root, blocknr);
1687 if (cur)
1688 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1689 else
1690 uptodate = 0;
1691 if (!cur || !uptodate) {
1692 if (!cur) {
1693 cur = read_tree_block(root, blocknr, gen);
1694 if (!cur || !extent_buffer_uptodate(cur)) {
1695 free_extent_buffer(cur);
1696 return -EIO;
1697 }
1698 } else if (!uptodate) {
1699 err = btrfs_read_buffer(cur, gen);
1700 if (err) {
1701 free_extent_buffer(cur);
1702 return err;
1703 }
1704 }
1705 }
1706 if (search_start == 0)
1707 search_start = last_block;
1708
1709 btrfs_tree_lock(cur);
1710 btrfs_set_lock_blocking(cur);
1711 err = __btrfs_cow_block(trans, root, cur, parent, i,
1712 &cur, search_start,
1713 min(16 * blocksize,
1714 (end_slot - i) * blocksize));
1715 if (err) {
1716 btrfs_tree_unlock(cur);
1717 free_extent_buffer(cur);
1718 break;
1719 }
1720 search_start = cur->start;
1721 last_block = cur->start;
1722 *last_ret = search_start;
1723 btrfs_tree_unlock(cur);
1724 free_extent_buffer(cur);
1725 }
1726 return err;
1727 }
1728
1729 /*
1730 * The leaf data grows from end-to-front in the node.
1731 * this returns the address of the start of the last item,
1732 * which is the stop of the leaf data stack
1733 */
1734 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1735 struct extent_buffer *leaf)
1736 {
1737 u32 nr = btrfs_header_nritems(leaf);
1738 if (nr == 0)
1739 return BTRFS_LEAF_DATA_SIZE(root);
1740 return btrfs_item_offset_nr(leaf, nr - 1);
1741 }
1742
1743
1744 /*
1745 * search for key in the extent_buffer. The items start at offset p,
1746 * and they are item_size apart. There are 'max' items in p.
1747 *
1748 * the slot in the array is returned via slot, and it points to
1749 * the place where you would insert key if it is not found in
1750 * the array.
1751 *
1752 * slot may point to max if the key is bigger than all of the keys
1753 */
1754 static noinline int generic_bin_search(struct extent_buffer *eb,
1755 unsigned long p,
1756 int item_size, struct btrfs_key *key,
1757 int max, int *slot)
1758 {
1759 int low = 0;
1760 int high = max;
1761 int mid;
1762 int ret;
1763 struct btrfs_disk_key *tmp = NULL;
1764 struct btrfs_disk_key unaligned;
1765 unsigned long offset;
1766 char *kaddr = NULL;
1767 unsigned long map_start = 0;
1768 unsigned long map_len = 0;
1769 int err;
1770
1771 while (low < high) {
1772 mid = (low + high) / 2;
1773 offset = p + mid * item_size;
1774
1775 if (!kaddr || offset < map_start ||
1776 (offset + sizeof(struct btrfs_disk_key)) >
1777 map_start + map_len) {
1778
1779 err = map_private_extent_buffer(eb, offset,
1780 sizeof(struct btrfs_disk_key),
1781 &kaddr, &map_start, &map_len);
1782
1783 if (!err) {
1784 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1785 map_start);
1786 } else {
1787 read_extent_buffer(eb, &unaligned,
1788 offset, sizeof(unaligned));
1789 tmp = &unaligned;
1790 }
1791
1792 } else {
1793 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1794 map_start);
1795 }
1796 ret = comp_keys(tmp, key);
1797
1798 if (ret < 0)
1799 low = mid + 1;
1800 else if (ret > 0)
1801 high = mid;
1802 else {
1803 *slot = mid;
1804 return 0;
1805 }
1806 }
1807 *slot = low;
1808 return 1;
1809 }
1810
1811 /*
1812 * simple bin_search frontend that does the right thing for
1813 * leaves vs nodes
1814 */
1815 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1816 int level, int *slot)
1817 {
1818 if (level == 0)
1819 return generic_bin_search(eb,
1820 offsetof(struct btrfs_leaf, items),
1821 sizeof(struct btrfs_item),
1822 key, btrfs_header_nritems(eb),
1823 slot);
1824 else
1825 return generic_bin_search(eb,
1826 offsetof(struct btrfs_node, ptrs),
1827 sizeof(struct btrfs_key_ptr),
1828 key, btrfs_header_nritems(eb),
1829 slot);
1830 }
1831
1832 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1833 int level, int *slot)
1834 {
1835 return bin_search(eb, key, level, slot);
1836 }
1837
1838 static void root_add_used(struct btrfs_root *root, u32 size)
1839 {
1840 spin_lock(&root->accounting_lock);
1841 btrfs_set_root_used(&root->root_item,
1842 btrfs_root_used(&root->root_item) + size);
1843 spin_unlock(&root->accounting_lock);
1844 }
1845
1846 static void root_sub_used(struct btrfs_root *root, u32 size)
1847 {
1848 spin_lock(&root->accounting_lock);
1849 btrfs_set_root_used(&root->root_item,
1850 btrfs_root_used(&root->root_item) - size);
1851 spin_unlock(&root->accounting_lock);
1852 }
1853
1854 /* given a node and slot number, this reads the blocks it points to. The
1855 * extent buffer is returned with a reference taken (but unlocked).
1856 * NULL is returned on error.
1857 */
1858 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1859 struct extent_buffer *parent, int slot)
1860 {
1861 int level = btrfs_header_level(parent);
1862 struct extent_buffer *eb;
1863
1864 if (slot < 0)
1865 return NULL;
1866 if (slot >= btrfs_header_nritems(parent))
1867 return NULL;
1868
1869 BUG_ON(level == 0);
1870
1871 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1872 btrfs_node_ptr_generation(parent, slot));
1873 if (eb && !extent_buffer_uptodate(eb)) {
1874 free_extent_buffer(eb);
1875 eb = NULL;
1876 }
1877
1878 return eb;
1879 }
1880
1881 /*
1882 * node level balancing, used to make sure nodes are in proper order for
1883 * item deletion. We balance from the top down, so we have to make sure
1884 * that a deletion won't leave an node completely empty later on.
1885 */
1886 static noinline int balance_level(struct btrfs_trans_handle *trans,
1887 struct btrfs_root *root,
1888 struct btrfs_path *path, int level)
1889 {
1890 struct extent_buffer *right = NULL;
1891 struct extent_buffer *mid;
1892 struct extent_buffer *left = NULL;
1893 struct extent_buffer *parent = NULL;
1894 int ret = 0;
1895 int wret;
1896 int pslot;
1897 int orig_slot = path->slots[level];
1898 u64 orig_ptr;
1899
1900 if (level == 0)
1901 return 0;
1902
1903 mid = path->nodes[level];
1904
1905 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1906 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1907 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1908
1909 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1910
1911 if (level < BTRFS_MAX_LEVEL - 1) {
1912 parent = path->nodes[level + 1];
1913 pslot = path->slots[level + 1];
1914 }
1915
1916 /*
1917 * deal with the case where there is only one pointer in the root
1918 * by promoting the node below to a root
1919 */
1920 if (!parent) {
1921 struct extent_buffer *child;
1922
1923 if (btrfs_header_nritems(mid) != 1)
1924 return 0;
1925
1926 /* promote the child to a root */
1927 child = read_node_slot(root, mid, 0);
1928 if (!child) {
1929 ret = -EROFS;
1930 btrfs_std_error(root->fs_info, ret);
1931 goto enospc;
1932 }
1933
1934 btrfs_tree_lock(child);
1935 btrfs_set_lock_blocking(child);
1936 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1937 if (ret) {
1938 btrfs_tree_unlock(child);
1939 free_extent_buffer(child);
1940 goto enospc;
1941 }
1942
1943 tree_mod_log_set_root_pointer(root, child, 1);
1944 rcu_assign_pointer(root->node, child);
1945
1946 add_root_to_dirty_list(root);
1947 btrfs_tree_unlock(child);
1948
1949 path->locks[level] = 0;
1950 path->nodes[level] = NULL;
1951 clean_tree_block(trans, root, mid);
1952 btrfs_tree_unlock(mid);
1953 /* once for the path */
1954 free_extent_buffer(mid);
1955
1956 root_sub_used(root, mid->len);
1957 btrfs_free_tree_block(trans, root, mid, 0, 1);
1958 /* once for the root ptr */
1959 free_extent_buffer_stale(mid);
1960 return 0;
1961 }
1962 if (btrfs_header_nritems(mid) >
1963 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1964 return 0;
1965
1966 left = read_node_slot(root, parent, pslot - 1);
1967 if (left) {
1968 btrfs_tree_lock(left);
1969 btrfs_set_lock_blocking(left);
1970 wret = btrfs_cow_block(trans, root, left,
1971 parent, pslot - 1, &left);
1972 if (wret) {
1973 ret = wret;
1974 goto enospc;
1975 }
1976 }
1977 right = read_node_slot(root, parent, pslot + 1);
1978 if (right) {
1979 btrfs_tree_lock(right);
1980 btrfs_set_lock_blocking(right);
1981 wret = btrfs_cow_block(trans, root, right,
1982 parent, pslot + 1, &right);
1983 if (wret) {
1984 ret = wret;
1985 goto enospc;
1986 }
1987 }
1988
1989 /* first, try to make some room in the middle buffer */
1990 if (left) {
1991 orig_slot += btrfs_header_nritems(left);
1992 wret = push_node_left(trans, root, left, mid, 1);
1993 if (wret < 0)
1994 ret = wret;
1995 }
1996
1997 /*
1998 * then try to empty the right most buffer into the middle
1999 */
2000 if (right) {
2001 wret = push_node_left(trans, root, mid, right, 1);
2002 if (wret < 0 && wret != -ENOSPC)
2003 ret = wret;
2004 if (btrfs_header_nritems(right) == 0) {
2005 clean_tree_block(trans, root, right);
2006 btrfs_tree_unlock(right);
2007 del_ptr(root, path, level + 1, pslot + 1);
2008 root_sub_used(root, right->len);
2009 btrfs_free_tree_block(trans, root, right, 0, 1);
2010 free_extent_buffer_stale(right);
2011 right = NULL;
2012 } else {
2013 struct btrfs_disk_key right_key;
2014 btrfs_node_key(right, &right_key, 0);
2015 tree_mod_log_set_node_key(root->fs_info, parent,
2016 pslot + 1, 0);
2017 btrfs_set_node_key(parent, &right_key, pslot + 1);
2018 btrfs_mark_buffer_dirty(parent);
2019 }
2020 }
2021 if (btrfs_header_nritems(mid) == 1) {
2022 /*
2023 * we're not allowed to leave a node with one item in the
2024 * tree during a delete. A deletion from lower in the tree
2025 * could try to delete the only pointer in this node.
2026 * So, pull some keys from the left.
2027 * There has to be a left pointer at this point because
2028 * otherwise we would have pulled some pointers from the
2029 * right
2030 */
2031 if (!left) {
2032 ret = -EROFS;
2033 btrfs_std_error(root->fs_info, ret);
2034 goto enospc;
2035 }
2036 wret = balance_node_right(trans, root, mid, left);
2037 if (wret < 0) {
2038 ret = wret;
2039 goto enospc;
2040 }
2041 if (wret == 1) {
2042 wret = push_node_left(trans, root, left, mid, 1);
2043 if (wret < 0)
2044 ret = wret;
2045 }
2046 BUG_ON(wret == 1);
2047 }
2048 if (btrfs_header_nritems(mid) == 0) {
2049 clean_tree_block(trans, root, mid);
2050 btrfs_tree_unlock(mid);
2051 del_ptr(root, path, level + 1, pslot);
2052 root_sub_used(root, mid->len);
2053 btrfs_free_tree_block(trans, root, mid, 0, 1);
2054 free_extent_buffer_stale(mid);
2055 mid = NULL;
2056 } else {
2057 /* update the parent key to reflect our changes */
2058 struct btrfs_disk_key mid_key;
2059 btrfs_node_key(mid, &mid_key, 0);
2060 tree_mod_log_set_node_key(root->fs_info, parent,
2061 pslot, 0);
2062 btrfs_set_node_key(parent, &mid_key, pslot);
2063 btrfs_mark_buffer_dirty(parent);
2064 }
2065
2066 /* update the path */
2067 if (left) {
2068 if (btrfs_header_nritems(left) > orig_slot) {
2069 extent_buffer_get(left);
2070 /* left was locked after cow */
2071 path->nodes[level] = left;
2072 path->slots[level + 1] -= 1;
2073 path->slots[level] = orig_slot;
2074 if (mid) {
2075 btrfs_tree_unlock(mid);
2076 free_extent_buffer(mid);
2077 }
2078 } else {
2079 orig_slot -= btrfs_header_nritems(left);
2080 path->slots[level] = orig_slot;
2081 }
2082 }
2083 /* double check we haven't messed things up */
2084 if (orig_ptr !=
2085 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2086 BUG();
2087 enospc:
2088 if (right) {
2089 btrfs_tree_unlock(right);
2090 free_extent_buffer(right);
2091 }
2092 if (left) {
2093 if (path->nodes[level] != left)
2094 btrfs_tree_unlock(left);
2095 free_extent_buffer(left);
2096 }
2097 return ret;
2098 }
2099
2100 /* Node balancing for insertion. Here we only split or push nodes around
2101 * when they are completely full. This is also done top down, so we
2102 * have to be pessimistic.
2103 */
2104 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2105 struct btrfs_root *root,
2106 struct btrfs_path *path, int level)
2107 {
2108 struct extent_buffer *right = NULL;
2109 struct extent_buffer *mid;
2110 struct extent_buffer *left = NULL;
2111 struct extent_buffer *parent = NULL;
2112 int ret = 0;
2113 int wret;
2114 int pslot;
2115 int orig_slot = path->slots[level];
2116
2117 if (level == 0)
2118 return 1;
2119
2120 mid = path->nodes[level];
2121 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2122
2123 if (level < BTRFS_MAX_LEVEL - 1) {
2124 parent = path->nodes[level + 1];
2125 pslot = path->slots[level + 1];
2126 }
2127
2128 if (!parent)
2129 return 1;
2130
2131 left = read_node_slot(root, parent, pslot - 1);
2132
2133 /* first, try to make some room in the middle buffer */
2134 if (left) {
2135 u32 left_nr;
2136
2137 btrfs_tree_lock(left);
2138 btrfs_set_lock_blocking(left);
2139
2140 left_nr = btrfs_header_nritems(left);
2141 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2142 wret = 1;
2143 } else {
2144 ret = btrfs_cow_block(trans, root, left, parent,
2145 pslot - 1, &left);
2146 if (ret)
2147 wret = 1;
2148 else {
2149 wret = push_node_left(trans, root,
2150 left, mid, 0);
2151 }
2152 }
2153 if (wret < 0)
2154 ret = wret;
2155 if (wret == 0) {
2156 struct btrfs_disk_key disk_key;
2157 orig_slot += left_nr;
2158 btrfs_node_key(mid, &disk_key, 0);
2159 tree_mod_log_set_node_key(root->fs_info, parent,
2160 pslot, 0);
2161 btrfs_set_node_key(parent, &disk_key, pslot);
2162 btrfs_mark_buffer_dirty(parent);
2163 if (btrfs_header_nritems(left) > orig_slot) {
2164 path->nodes[level] = left;
2165 path->slots[level + 1] -= 1;
2166 path->slots[level] = orig_slot;
2167 btrfs_tree_unlock(mid);
2168 free_extent_buffer(mid);
2169 } else {
2170 orig_slot -=
2171 btrfs_header_nritems(left);
2172 path->slots[level] = orig_slot;
2173 btrfs_tree_unlock(left);
2174 free_extent_buffer(left);
2175 }
2176 return 0;
2177 }
2178 btrfs_tree_unlock(left);
2179 free_extent_buffer(left);
2180 }
2181 right = read_node_slot(root, parent, pslot + 1);
2182
2183 /*
2184 * then try to empty the right most buffer into the middle
2185 */
2186 if (right) {
2187 u32 right_nr;
2188
2189 btrfs_tree_lock(right);
2190 btrfs_set_lock_blocking(right);
2191
2192 right_nr = btrfs_header_nritems(right);
2193 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2194 wret = 1;
2195 } else {
2196 ret = btrfs_cow_block(trans, root, right,
2197 parent, pslot + 1,
2198 &right);
2199 if (ret)
2200 wret = 1;
2201 else {
2202 wret = balance_node_right(trans, root,
2203 right, mid);
2204 }
2205 }
2206 if (wret < 0)
2207 ret = wret;
2208 if (wret == 0) {
2209 struct btrfs_disk_key disk_key;
2210
2211 btrfs_node_key(right, &disk_key, 0);
2212 tree_mod_log_set_node_key(root->fs_info, parent,
2213 pslot + 1, 0);
2214 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2215 btrfs_mark_buffer_dirty(parent);
2216
2217 if (btrfs_header_nritems(mid) <= orig_slot) {
2218 path->nodes[level] = right;
2219 path->slots[level + 1] += 1;
2220 path->slots[level] = orig_slot -
2221 btrfs_header_nritems(mid);
2222 btrfs_tree_unlock(mid);
2223 free_extent_buffer(mid);
2224 } else {
2225 btrfs_tree_unlock(right);
2226 free_extent_buffer(right);
2227 }
2228 return 0;
2229 }
2230 btrfs_tree_unlock(right);
2231 free_extent_buffer(right);
2232 }
2233 return 1;
2234 }
2235
2236 /*
2237 * readahead one full node of leaves, finding things that are close
2238 * to the block in 'slot', and triggering ra on them.
2239 */
2240 static void reada_for_search(struct btrfs_root *root,
2241 struct btrfs_path *path,
2242 int level, int slot, u64 objectid)
2243 {
2244 struct extent_buffer *node;
2245 struct btrfs_disk_key disk_key;
2246 u32 nritems;
2247 u64 search;
2248 u64 target;
2249 u64 nread = 0;
2250 u64 gen;
2251 int direction = path->reada;
2252 struct extent_buffer *eb;
2253 u32 nr;
2254 u32 blocksize;
2255 u32 nscan = 0;
2256
2257 if (level != 1)
2258 return;
2259
2260 if (!path->nodes[level])
2261 return;
2262
2263 node = path->nodes[level];
2264
2265 search = btrfs_node_blockptr(node, slot);
2266 blocksize = root->nodesize;
2267 eb = btrfs_find_tree_block(root, search);
2268 if (eb) {
2269 free_extent_buffer(eb);
2270 return;
2271 }
2272
2273 target = search;
2274
2275 nritems = btrfs_header_nritems(node);
2276 nr = slot;
2277
2278 while (1) {
2279 if (direction < 0) {
2280 if (nr == 0)
2281 break;
2282 nr--;
2283 } else if (direction > 0) {
2284 nr++;
2285 if (nr >= nritems)
2286 break;
2287 }
2288 if (path->reada < 0 && objectid) {
2289 btrfs_node_key(node, &disk_key, nr);
2290 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2291 break;
2292 }
2293 search = btrfs_node_blockptr(node, nr);
2294 if ((search <= target && target - search <= 65536) ||
2295 (search > target && search - target <= 65536)) {
2296 gen = btrfs_node_ptr_generation(node, nr);
2297 readahead_tree_block(root, search, blocksize);
2298 nread += blocksize;
2299 }
2300 nscan++;
2301 if ((nread > 65536 || nscan > 32))
2302 break;
2303 }
2304 }
2305
2306 static noinline void reada_for_balance(struct btrfs_root *root,
2307 struct btrfs_path *path, int level)
2308 {
2309 int slot;
2310 int nritems;
2311 struct extent_buffer *parent;
2312 struct extent_buffer *eb;
2313 u64 gen;
2314 u64 block1 = 0;
2315 u64 block2 = 0;
2316 int blocksize;
2317
2318 parent = path->nodes[level + 1];
2319 if (!parent)
2320 return;
2321
2322 nritems = btrfs_header_nritems(parent);
2323 slot = path->slots[level + 1];
2324 blocksize = root->nodesize;
2325
2326 if (slot > 0) {
2327 block1 = btrfs_node_blockptr(parent, slot - 1);
2328 gen = btrfs_node_ptr_generation(parent, slot - 1);
2329 eb = btrfs_find_tree_block(root, block1);
2330 /*
2331 * if we get -eagain from btrfs_buffer_uptodate, we
2332 * don't want to return eagain here. That will loop
2333 * forever
2334 */
2335 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2336 block1 = 0;
2337 free_extent_buffer(eb);
2338 }
2339 if (slot + 1 < nritems) {
2340 block2 = btrfs_node_blockptr(parent, slot + 1);
2341 gen = btrfs_node_ptr_generation(parent, slot + 1);
2342 eb = btrfs_find_tree_block(root, block2);
2343 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2344 block2 = 0;
2345 free_extent_buffer(eb);
2346 }
2347
2348 if (block1)
2349 readahead_tree_block(root, block1, blocksize);
2350 if (block2)
2351 readahead_tree_block(root, block2, blocksize);
2352 }
2353
2354
2355 /*
2356 * when we walk down the tree, it is usually safe to unlock the higher layers
2357 * in the tree. The exceptions are when our path goes through slot 0, because
2358 * operations on the tree might require changing key pointers higher up in the
2359 * tree.
2360 *
2361 * callers might also have set path->keep_locks, which tells this code to keep
2362 * the lock if the path points to the last slot in the block. This is part of
2363 * walking through the tree, and selecting the next slot in the higher block.
2364 *
2365 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2366 * if lowest_unlock is 1, level 0 won't be unlocked
2367 */
2368 static noinline void unlock_up(struct btrfs_path *path, int level,
2369 int lowest_unlock, int min_write_lock_level,
2370 int *write_lock_level)
2371 {
2372 int i;
2373 int skip_level = level;
2374 int no_skips = 0;
2375 struct extent_buffer *t;
2376
2377 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2378 if (!path->nodes[i])
2379 break;
2380 if (!path->locks[i])
2381 break;
2382 if (!no_skips && path->slots[i] == 0) {
2383 skip_level = i + 1;
2384 continue;
2385 }
2386 if (!no_skips && path->keep_locks) {
2387 u32 nritems;
2388 t = path->nodes[i];
2389 nritems = btrfs_header_nritems(t);
2390 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2391 skip_level = i + 1;
2392 continue;
2393 }
2394 }
2395 if (skip_level < i && i >= lowest_unlock)
2396 no_skips = 1;
2397
2398 t = path->nodes[i];
2399 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2400 btrfs_tree_unlock_rw(t, path->locks[i]);
2401 path->locks[i] = 0;
2402 if (write_lock_level &&
2403 i > min_write_lock_level &&
2404 i <= *write_lock_level) {
2405 *write_lock_level = i - 1;
2406 }
2407 }
2408 }
2409 }
2410
2411 /*
2412 * This releases any locks held in the path starting at level and
2413 * going all the way up to the root.
2414 *
2415 * btrfs_search_slot will keep the lock held on higher nodes in a few
2416 * corner cases, such as COW of the block at slot zero in the node. This
2417 * ignores those rules, and it should only be called when there are no
2418 * more updates to be done higher up in the tree.
2419 */
2420 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2421 {
2422 int i;
2423
2424 if (path->keep_locks)
2425 return;
2426
2427 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2428 if (!path->nodes[i])
2429 continue;
2430 if (!path->locks[i])
2431 continue;
2432 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2433 path->locks[i] = 0;
2434 }
2435 }
2436
2437 /*
2438 * helper function for btrfs_search_slot. The goal is to find a block
2439 * in cache without setting the path to blocking. If we find the block
2440 * we return zero and the path is unchanged.
2441 *
2442 * If we can't find the block, we set the path blocking and do some
2443 * reada. -EAGAIN is returned and the search must be repeated.
2444 */
2445 static int
2446 read_block_for_search(struct btrfs_trans_handle *trans,
2447 struct btrfs_root *root, struct btrfs_path *p,
2448 struct extent_buffer **eb_ret, int level, int slot,
2449 struct btrfs_key *key, u64 time_seq)
2450 {
2451 u64 blocknr;
2452 u64 gen;
2453 struct extent_buffer *b = *eb_ret;
2454 struct extent_buffer *tmp;
2455 int ret;
2456
2457 blocknr = btrfs_node_blockptr(b, slot);
2458 gen = btrfs_node_ptr_generation(b, slot);
2459
2460 tmp = btrfs_find_tree_block(root, blocknr);
2461 if (tmp) {
2462 /* first we do an atomic uptodate check */
2463 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2464 *eb_ret = tmp;
2465 return 0;
2466 }
2467
2468 /* the pages were up to date, but we failed
2469 * the generation number check. Do a full
2470 * read for the generation number that is correct.
2471 * We must do this without dropping locks so
2472 * we can trust our generation number
2473 */
2474 btrfs_set_path_blocking(p);
2475
2476 /* now we're allowed to do a blocking uptodate check */
2477 ret = btrfs_read_buffer(tmp, gen);
2478 if (!ret) {
2479 *eb_ret = tmp;
2480 return 0;
2481 }
2482 free_extent_buffer(tmp);
2483 btrfs_release_path(p);
2484 return -EIO;
2485 }
2486
2487 /*
2488 * reduce lock contention at high levels
2489 * of the btree by dropping locks before
2490 * we read. Don't release the lock on the current
2491 * level because we need to walk this node to figure
2492 * out which blocks to read.
2493 */
2494 btrfs_unlock_up_safe(p, level + 1);
2495 btrfs_set_path_blocking(p);
2496
2497 free_extent_buffer(tmp);
2498 if (p->reada)
2499 reada_for_search(root, p, level, slot, key->objectid);
2500
2501 btrfs_release_path(p);
2502
2503 ret = -EAGAIN;
2504 tmp = read_tree_block(root, blocknr, 0);
2505 if (tmp) {
2506 /*
2507 * If the read above didn't mark this buffer up to date,
2508 * it will never end up being up to date. Set ret to EIO now
2509 * and give up so that our caller doesn't loop forever
2510 * on our EAGAINs.
2511 */
2512 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2513 ret = -EIO;
2514 free_extent_buffer(tmp);
2515 }
2516 return ret;
2517 }
2518
2519 /*
2520 * helper function for btrfs_search_slot. This does all of the checks
2521 * for node-level blocks and does any balancing required based on
2522 * the ins_len.
2523 *
2524 * If no extra work was required, zero is returned. If we had to
2525 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2526 * start over
2527 */
2528 static int
2529 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2530 struct btrfs_root *root, struct btrfs_path *p,
2531 struct extent_buffer *b, int level, int ins_len,
2532 int *write_lock_level)
2533 {
2534 int ret;
2535 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2536 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2537 int sret;
2538
2539 if (*write_lock_level < level + 1) {
2540 *write_lock_level = level + 1;
2541 btrfs_release_path(p);
2542 goto again;
2543 }
2544
2545 btrfs_set_path_blocking(p);
2546 reada_for_balance(root, p, level);
2547 sret = split_node(trans, root, p, level);
2548 btrfs_clear_path_blocking(p, NULL, 0);
2549
2550 BUG_ON(sret > 0);
2551 if (sret) {
2552 ret = sret;
2553 goto done;
2554 }
2555 b = p->nodes[level];
2556 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2557 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2558 int sret;
2559
2560 if (*write_lock_level < level + 1) {
2561 *write_lock_level = level + 1;
2562 btrfs_release_path(p);
2563 goto again;
2564 }
2565
2566 btrfs_set_path_blocking(p);
2567 reada_for_balance(root, p, level);
2568 sret = balance_level(trans, root, p, level);
2569 btrfs_clear_path_blocking(p, NULL, 0);
2570
2571 if (sret) {
2572 ret = sret;
2573 goto done;
2574 }
2575 b = p->nodes[level];
2576 if (!b) {
2577 btrfs_release_path(p);
2578 goto again;
2579 }
2580 BUG_ON(btrfs_header_nritems(b) == 1);
2581 }
2582 return 0;
2583
2584 again:
2585 ret = -EAGAIN;
2586 done:
2587 return ret;
2588 }
2589
2590 static void key_search_validate(struct extent_buffer *b,
2591 struct btrfs_key *key,
2592 int level)
2593 {
2594 #ifdef CONFIG_BTRFS_ASSERT
2595 struct btrfs_disk_key disk_key;
2596
2597 btrfs_cpu_key_to_disk(&disk_key, key);
2598
2599 if (level == 0)
2600 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2601 offsetof(struct btrfs_leaf, items[0].key),
2602 sizeof(disk_key)));
2603 else
2604 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2605 offsetof(struct btrfs_node, ptrs[0].key),
2606 sizeof(disk_key)));
2607 #endif
2608 }
2609
2610 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2611 int level, int *prev_cmp, int *slot)
2612 {
2613 if (*prev_cmp != 0) {
2614 *prev_cmp = bin_search(b, key, level, slot);
2615 return *prev_cmp;
2616 }
2617
2618 key_search_validate(b, key, level);
2619 *slot = 0;
2620
2621 return 0;
2622 }
2623
2624 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
2625 u64 iobjectid, u64 ioff, u8 key_type,
2626 struct btrfs_key *found_key)
2627 {
2628 int ret;
2629 struct btrfs_key key;
2630 struct extent_buffer *eb;
2631 struct btrfs_path *path;
2632
2633 key.type = key_type;
2634 key.objectid = iobjectid;
2635 key.offset = ioff;
2636
2637 if (found_path == NULL) {
2638 path = btrfs_alloc_path();
2639 if (!path)
2640 return -ENOMEM;
2641 } else
2642 path = found_path;
2643
2644 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2645 if ((ret < 0) || (found_key == NULL)) {
2646 if (path != found_path)
2647 btrfs_free_path(path);
2648 return ret;
2649 }
2650
2651 eb = path->nodes[0];
2652 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2653 ret = btrfs_next_leaf(fs_root, path);
2654 if (ret)
2655 return ret;
2656 eb = path->nodes[0];
2657 }
2658
2659 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2660 if (found_key->type != key.type ||
2661 found_key->objectid != key.objectid)
2662 return 1;
2663
2664 return 0;
2665 }
2666
2667 /*
2668 * look for key in the tree. path is filled in with nodes along the way
2669 * if key is found, we return zero and you can find the item in the leaf
2670 * level of the path (level 0)
2671 *
2672 * If the key isn't found, the path points to the slot where it should
2673 * be inserted, and 1 is returned. If there are other errors during the
2674 * search a negative error number is returned.
2675 *
2676 * if ins_len > 0, nodes and leaves will be split as we walk down the
2677 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2678 * possible)
2679 */
2680 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2681 *root, struct btrfs_key *key, struct btrfs_path *p, int
2682 ins_len, int cow)
2683 {
2684 struct extent_buffer *b;
2685 int slot;
2686 int ret;
2687 int err;
2688 int level;
2689 int lowest_unlock = 1;
2690 int root_lock;
2691 /* everything at write_lock_level or lower must be write locked */
2692 int write_lock_level = 0;
2693 u8 lowest_level = 0;
2694 int min_write_lock_level;
2695 int prev_cmp;
2696
2697 lowest_level = p->lowest_level;
2698 WARN_ON(lowest_level && ins_len > 0);
2699 WARN_ON(p->nodes[0] != NULL);
2700 BUG_ON(!cow && ins_len);
2701
2702 if (ins_len < 0) {
2703 lowest_unlock = 2;
2704
2705 /* when we are removing items, we might have to go up to level
2706 * two as we update tree pointers Make sure we keep write
2707 * for those levels as well
2708 */
2709 write_lock_level = 2;
2710 } else if (ins_len > 0) {
2711 /*
2712 * for inserting items, make sure we have a write lock on
2713 * level 1 so we can update keys
2714 */
2715 write_lock_level = 1;
2716 }
2717
2718 if (!cow)
2719 write_lock_level = -1;
2720
2721 if (cow && (p->keep_locks || p->lowest_level))
2722 write_lock_level = BTRFS_MAX_LEVEL;
2723
2724 min_write_lock_level = write_lock_level;
2725
2726 again:
2727 prev_cmp = -1;
2728 /*
2729 * we try very hard to do read locks on the root
2730 */
2731 root_lock = BTRFS_READ_LOCK;
2732 level = 0;
2733 if (p->search_commit_root) {
2734 /*
2735 * the commit roots are read only
2736 * so we always do read locks
2737 */
2738 if (p->need_commit_sem)
2739 down_read(&root->fs_info->commit_root_sem);
2740 b = root->commit_root;
2741 extent_buffer_get(b);
2742 level = btrfs_header_level(b);
2743 if (p->need_commit_sem)
2744 up_read(&root->fs_info->commit_root_sem);
2745 if (!p->skip_locking)
2746 btrfs_tree_read_lock(b);
2747 } else {
2748 if (p->skip_locking) {
2749 b = btrfs_root_node(root);
2750 level = btrfs_header_level(b);
2751 } else {
2752 /* we don't know the level of the root node
2753 * until we actually have it read locked
2754 */
2755 b = btrfs_read_lock_root_node(root);
2756 level = btrfs_header_level(b);
2757 if (level <= write_lock_level) {
2758 /* whoops, must trade for write lock */
2759 btrfs_tree_read_unlock(b);
2760 free_extent_buffer(b);
2761 b = btrfs_lock_root_node(root);
2762 root_lock = BTRFS_WRITE_LOCK;
2763
2764 /* the level might have changed, check again */
2765 level = btrfs_header_level(b);
2766 }
2767 }
2768 }
2769 p->nodes[level] = b;
2770 if (!p->skip_locking)
2771 p->locks[level] = root_lock;
2772
2773 while (b) {
2774 level = btrfs_header_level(b);
2775
2776 /*
2777 * setup the path here so we can release it under lock
2778 * contention with the cow code
2779 */
2780 if (cow) {
2781 /*
2782 * if we don't really need to cow this block
2783 * then we don't want to set the path blocking,
2784 * so we test it here
2785 */
2786 if (!should_cow_block(trans, root, b))
2787 goto cow_done;
2788
2789 /*
2790 * must have write locks on this node and the
2791 * parent
2792 */
2793 if (level > write_lock_level ||
2794 (level + 1 > write_lock_level &&
2795 level + 1 < BTRFS_MAX_LEVEL &&
2796 p->nodes[level + 1])) {
2797 write_lock_level = level + 1;
2798 btrfs_release_path(p);
2799 goto again;
2800 }
2801
2802 btrfs_set_path_blocking(p);
2803 err = btrfs_cow_block(trans, root, b,
2804 p->nodes[level + 1],
2805 p->slots[level + 1], &b);
2806 if (err) {
2807 ret = err;
2808 goto done;
2809 }
2810 }
2811 cow_done:
2812 p->nodes[level] = b;
2813 btrfs_clear_path_blocking(p, NULL, 0);
2814
2815 /*
2816 * we have a lock on b and as long as we aren't changing
2817 * the tree, there is no way to for the items in b to change.
2818 * It is safe to drop the lock on our parent before we
2819 * go through the expensive btree search on b.
2820 *
2821 * If we're inserting or deleting (ins_len != 0), then we might
2822 * be changing slot zero, which may require changing the parent.
2823 * So, we can't drop the lock until after we know which slot
2824 * we're operating on.
2825 */
2826 if (!ins_len && !p->keep_locks) {
2827 int u = level + 1;
2828
2829 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2830 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2831 p->locks[u] = 0;
2832 }
2833 }
2834
2835 ret = key_search(b, key, level, &prev_cmp, &slot);
2836
2837 if (level != 0) {
2838 int dec = 0;
2839 if (ret && slot > 0) {
2840 dec = 1;
2841 slot -= 1;
2842 }
2843 p->slots[level] = slot;
2844 err = setup_nodes_for_search(trans, root, p, b, level,
2845 ins_len, &write_lock_level);
2846 if (err == -EAGAIN)
2847 goto again;
2848 if (err) {
2849 ret = err;
2850 goto done;
2851 }
2852 b = p->nodes[level];
2853 slot = p->slots[level];
2854
2855 /*
2856 * slot 0 is special, if we change the key
2857 * we have to update the parent pointer
2858 * which means we must have a write lock
2859 * on the parent
2860 */
2861 if (slot == 0 && ins_len &&
2862 write_lock_level < level + 1) {
2863 write_lock_level = level + 1;
2864 btrfs_release_path(p);
2865 goto again;
2866 }
2867
2868 unlock_up(p, level, lowest_unlock,
2869 min_write_lock_level, &write_lock_level);
2870
2871 if (level == lowest_level) {
2872 if (dec)
2873 p->slots[level]++;
2874 goto done;
2875 }
2876
2877 err = read_block_for_search(trans, root, p,
2878 &b, level, slot, key, 0);
2879 if (err == -EAGAIN)
2880 goto again;
2881 if (err) {
2882 ret = err;
2883 goto done;
2884 }
2885
2886 if (!p->skip_locking) {
2887 level = btrfs_header_level(b);
2888 if (level <= write_lock_level) {
2889 err = btrfs_try_tree_write_lock(b);
2890 if (!err) {
2891 btrfs_set_path_blocking(p);
2892 btrfs_tree_lock(b);
2893 btrfs_clear_path_blocking(p, b,
2894 BTRFS_WRITE_LOCK);
2895 }
2896 p->locks[level] = BTRFS_WRITE_LOCK;
2897 } else {
2898 err = btrfs_try_tree_read_lock(b);
2899 if (!err) {
2900 btrfs_set_path_blocking(p);
2901 btrfs_tree_read_lock(b);
2902 btrfs_clear_path_blocking(p, b,
2903 BTRFS_READ_LOCK);
2904 }
2905 p->locks[level] = BTRFS_READ_LOCK;
2906 }
2907 p->nodes[level] = b;
2908 }
2909 } else {
2910 p->slots[level] = slot;
2911 if (ins_len > 0 &&
2912 btrfs_leaf_free_space(root, b) < ins_len) {
2913 if (write_lock_level < 1) {
2914 write_lock_level = 1;
2915 btrfs_release_path(p);
2916 goto again;
2917 }
2918
2919 btrfs_set_path_blocking(p);
2920 err = split_leaf(trans, root, key,
2921 p, ins_len, ret == 0);
2922 btrfs_clear_path_blocking(p, NULL, 0);
2923
2924 BUG_ON(err > 0);
2925 if (err) {
2926 ret = err;
2927 goto done;
2928 }
2929 }
2930 if (!p->search_for_split)
2931 unlock_up(p, level, lowest_unlock,
2932 min_write_lock_level, &write_lock_level);
2933 goto done;
2934 }
2935 }
2936 ret = 1;
2937 done:
2938 /*
2939 * we don't really know what they plan on doing with the path
2940 * from here on, so for now just mark it as blocking
2941 */
2942 if (!p->leave_spinning)
2943 btrfs_set_path_blocking(p);
2944 if (ret < 0)
2945 btrfs_release_path(p);
2946 return ret;
2947 }
2948
2949 /*
2950 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2951 * current state of the tree together with the operations recorded in the tree
2952 * modification log to search for the key in a previous version of this tree, as
2953 * denoted by the time_seq parameter.
2954 *
2955 * Naturally, there is no support for insert, delete or cow operations.
2956 *
2957 * The resulting path and return value will be set up as if we called
2958 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2959 */
2960 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2961 struct btrfs_path *p, u64 time_seq)
2962 {
2963 struct extent_buffer *b;
2964 int slot;
2965 int ret;
2966 int err;
2967 int level;
2968 int lowest_unlock = 1;
2969 u8 lowest_level = 0;
2970 int prev_cmp = -1;
2971
2972 lowest_level = p->lowest_level;
2973 WARN_ON(p->nodes[0] != NULL);
2974
2975 if (p->search_commit_root) {
2976 BUG_ON(time_seq);
2977 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2978 }
2979
2980 again:
2981 b = get_old_root(root, time_seq);
2982 level = btrfs_header_level(b);
2983 p->locks[level] = BTRFS_READ_LOCK;
2984
2985 while (b) {
2986 level = btrfs_header_level(b);
2987 p->nodes[level] = b;
2988 btrfs_clear_path_blocking(p, NULL, 0);
2989
2990 /*
2991 * we have a lock on b and as long as we aren't changing
2992 * the tree, there is no way to for the items in b to change.
2993 * It is safe to drop the lock on our parent before we
2994 * go through the expensive btree search on b.
2995 */
2996 btrfs_unlock_up_safe(p, level + 1);
2997
2998 /*
2999 * Since we can unwind eb's we want to do a real search every
3000 * time.
3001 */
3002 prev_cmp = -1;
3003 ret = key_search(b, key, level, &prev_cmp, &slot);
3004
3005 if (level != 0) {
3006 int dec = 0;
3007 if (ret && slot > 0) {
3008 dec = 1;
3009 slot -= 1;
3010 }
3011 p->slots[level] = slot;
3012 unlock_up(p, level, lowest_unlock, 0, NULL);
3013
3014 if (level == lowest_level) {
3015 if (dec)
3016 p->slots[level]++;
3017 goto done;
3018 }
3019
3020 err = read_block_for_search(NULL, root, p, &b, level,
3021 slot, key, time_seq);
3022 if (err == -EAGAIN)
3023 goto again;
3024 if (err) {
3025 ret = err;
3026 goto done;
3027 }
3028
3029 level = btrfs_header_level(b);
3030 err = btrfs_try_tree_read_lock(b);
3031 if (!err) {
3032 btrfs_set_path_blocking(p);
3033 btrfs_tree_read_lock(b);
3034 btrfs_clear_path_blocking(p, b,
3035 BTRFS_READ_LOCK);
3036 }
3037 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3038 if (!b) {
3039 ret = -ENOMEM;
3040 goto done;
3041 }
3042 p->locks[level] = BTRFS_READ_LOCK;
3043 p->nodes[level] = b;
3044 } else {
3045 p->slots[level] = slot;
3046 unlock_up(p, level, lowest_unlock, 0, NULL);
3047 goto done;
3048 }
3049 }
3050 ret = 1;
3051 done:
3052 if (!p->leave_spinning)
3053 btrfs_set_path_blocking(p);
3054 if (ret < 0)
3055 btrfs_release_path(p);
3056
3057 return ret;
3058 }
3059
3060 /*
3061 * helper to use instead of search slot if no exact match is needed but
3062 * instead the next or previous item should be returned.
3063 * When find_higher is true, the next higher item is returned, the next lower
3064 * otherwise.
3065 * When return_any and find_higher are both true, and no higher item is found,
3066 * return the next lower instead.
3067 * When return_any is true and find_higher is false, and no lower item is found,
3068 * return the next higher instead.
3069 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3070 * < 0 on error
3071 */
3072 int btrfs_search_slot_for_read(struct btrfs_root *root,
3073 struct btrfs_key *key, struct btrfs_path *p,
3074 int find_higher, int return_any)
3075 {
3076 int ret;
3077 struct extent_buffer *leaf;
3078
3079 again:
3080 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3081 if (ret <= 0)
3082 return ret;
3083 /*
3084 * a return value of 1 means the path is at the position where the
3085 * item should be inserted. Normally this is the next bigger item,
3086 * but in case the previous item is the last in a leaf, path points
3087 * to the first free slot in the previous leaf, i.e. at an invalid
3088 * item.
3089 */
3090 leaf = p->nodes[0];
3091
3092 if (find_higher) {
3093 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3094 ret = btrfs_next_leaf(root, p);
3095 if (ret <= 0)
3096 return ret;
3097 if (!return_any)
3098 return 1;
3099 /*
3100 * no higher item found, return the next
3101 * lower instead
3102 */
3103 return_any = 0;
3104 find_higher = 0;
3105 btrfs_release_path(p);
3106 goto again;
3107 }
3108 } else {
3109 if (p->slots[0] == 0) {
3110 ret = btrfs_prev_leaf(root, p);
3111 if (ret < 0)
3112 return ret;
3113 if (!ret) {
3114 leaf = p->nodes[0];
3115 if (p->slots[0] == btrfs_header_nritems(leaf))
3116 p->slots[0]--;
3117 return 0;
3118 }
3119 if (!return_any)
3120 return 1;
3121 /*
3122 * no lower item found, return the next
3123 * higher instead
3124 */
3125 return_any = 0;
3126 find_higher = 1;
3127 btrfs_release_path(p);
3128 goto again;
3129 } else {
3130 --p->slots[0];
3131 }
3132 }
3133 return 0;
3134 }
3135
3136 /*
3137 * adjust the pointers going up the tree, starting at level
3138 * making sure the right key of each node is points to 'key'.
3139 * This is used after shifting pointers to the left, so it stops
3140 * fixing up pointers when a given leaf/node is not in slot 0 of the
3141 * higher levels
3142 *
3143 */
3144 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
3145 struct btrfs_disk_key *key, int level)
3146 {
3147 int i;
3148 struct extent_buffer *t;
3149
3150 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3151 int tslot = path->slots[i];
3152 if (!path->nodes[i])
3153 break;
3154 t = path->nodes[i];
3155 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
3156 btrfs_set_node_key(t, key, tslot);
3157 btrfs_mark_buffer_dirty(path->nodes[i]);
3158 if (tslot != 0)
3159 break;
3160 }
3161 }
3162
3163 /*
3164 * update item key.
3165 *
3166 * This function isn't completely safe. It's the caller's responsibility
3167 * that the new key won't break the order
3168 */
3169 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
3170 struct btrfs_key *new_key)
3171 {
3172 struct btrfs_disk_key disk_key;
3173 struct extent_buffer *eb;
3174 int slot;
3175
3176 eb = path->nodes[0];
3177 slot = path->slots[0];
3178 if (slot > 0) {
3179 btrfs_item_key(eb, &disk_key, slot - 1);
3180 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3181 }
3182 if (slot < btrfs_header_nritems(eb) - 1) {
3183 btrfs_item_key(eb, &disk_key, slot + 1);
3184 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3185 }
3186
3187 btrfs_cpu_key_to_disk(&disk_key, new_key);
3188 btrfs_set_item_key(eb, &disk_key, slot);
3189 btrfs_mark_buffer_dirty(eb);
3190 if (slot == 0)
3191 fixup_low_keys(root, path, &disk_key, 1);
3192 }
3193
3194 /*
3195 * try to push data from one node into the next node left in the
3196 * tree.
3197 *
3198 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3199 * error, and > 0 if there was no room in the left hand block.
3200 */
3201 static int push_node_left(struct btrfs_trans_handle *trans,
3202 struct btrfs_root *root, struct extent_buffer *dst,
3203 struct extent_buffer *src, int empty)
3204 {
3205 int push_items = 0;
3206 int src_nritems;
3207 int dst_nritems;
3208 int ret = 0;
3209
3210 src_nritems = btrfs_header_nritems(src);
3211 dst_nritems = btrfs_header_nritems(dst);
3212 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3213 WARN_ON(btrfs_header_generation(src) != trans->transid);
3214 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3215
3216 if (!empty && src_nritems <= 8)
3217 return 1;
3218
3219 if (push_items <= 0)
3220 return 1;
3221
3222 if (empty) {
3223 push_items = min(src_nritems, push_items);
3224 if (push_items < src_nritems) {
3225 /* leave at least 8 pointers in the node if
3226 * we aren't going to empty it
3227 */
3228 if (src_nritems - push_items < 8) {
3229 if (push_items <= 8)
3230 return 1;
3231 push_items -= 8;
3232 }
3233 }
3234 } else
3235 push_items = min(src_nritems - 8, push_items);
3236
3237 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3238 push_items);
3239 if (ret) {
3240 btrfs_abort_transaction(trans, root, ret);
3241 return ret;
3242 }
3243 copy_extent_buffer(dst, src,
3244 btrfs_node_key_ptr_offset(dst_nritems),
3245 btrfs_node_key_ptr_offset(0),
3246 push_items * sizeof(struct btrfs_key_ptr));
3247
3248 if (push_items < src_nritems) {
3249 /*
3250 * don't call tree_mod_log_eb_move here, key removal was already
3251 * fully logged by tree_mod_log_eb_copy above.
3252 */
3253 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3254 btrfs_node_key_ptr_offset(push_items),
3255 (src_nritems - push_items) *
3256 sizeof(struct btrfs_key_ptr));
3257 }
3258 btrfs_set_header_nritems(src, src_nritems - push_items);
3259 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3260 btrfs_mark_buffer_dirty(src);
3261 btrfs_mark_buffer_dirty(dst);
3262
3263 return ret;
3264 }
3265
3266 /*
3267 * try to push data from one node into the next node right in the
3268 * tree.
3269 *
3270 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3271 * error, and > 0 if there was no room in the right hand block.
3272 *
3273 * this will only push up to 1/2 the contents of the left node over
3274 */
3275 static int balance_node_right(struct btrfs_trans_handle *trans,
3276 struct btrfs_root *root,
3277 struct extent_buffer *dst,
3278 struct extent_buffer *src)
3279 {
3280 int push_items = 0;
3281 int max_push;
3282 int src_nritems;
3283 int dst_nritems;
3284 int ret = 0;
3285
3286 WARN_ON(btrfs_header_generation(src) != trans->transid);
3287 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3288
3289 src_nritems = btrfs_header_nritems(src);
3290 dst_nritems = btrfs_header_nritems(dst);
3291 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3292 if (push_items <= 0)
3293 return 1;
3294
3295 if (src_nritems < 4)
3296 return 1;
3297
3298 max_push = src_nritems / 2 + 1;
3299 /* don't try to empty the node */
3300 if (max_push >= src_nritems)
3301 return 1;
3302
3303 if (max_push < push_items)
3304 push_items = max_push;
3305
3306 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3307 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3308 btrfs_node_key_ptr_offset(0),
3309 (dst_nritems) *
3310 sizeof(struct btrfs_key_ptr));
3311
3312 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3313 src_nritems - push_items, push_items);
3314 if (ret) {
3315 btrfs_abort_transaction(trans, root, ret);
3316 return ret;
3317 }
3318 copy_extent_buffer(dst, src,
3319 btrfs_node_key_ptr_offset(0),
3320 btrfs_node_key_ptr_offset(src_nritems - push_items),
3321 push_items * sizeof(struct btrfs_key_ptr));
3322
3323 btrfs_set_header_nritems(src, src_nritems - push_items);
3324 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3325
3326 btrfs_mark_buffer_dirty(src);
3327 btrfs_mark_buffer_dirty(dst);
3328
3329 return ret;
3330 }
3331
3332 /*
3333 * helper function to insert a new root level in the tree.
3334 * A new node is allocated, and a single item is inserted to
3335 * point to the existing root
3336 *
3337 * returns zero on success or < 0 on failure.
3338 */
3339 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3340 struct btrfs_root *root,
3341 struct btrfs_path *path, int level)
3342 {
3343 u64 lower_gen;
3344 struct extent_buffer *lower;
3345 struct extent_buffer *c;
3346 struct extent_buffer *old;
3347 struct btrfs_disk_key lower_key;
3348
3349 BUG_ON(path->nodes[level]);
3350 BUG_ON(path->nodes[level-1] != root->node);
3351
3352 lower = path->nodes[level-1];
3353 if (level == 1)
3354 btrfs_item_key(lower, &lower_key, 0);
3355 else
3356 btrfs_node_key(lower, &lower_key, 0);
3357
3358 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3359 root->root_key.objectid, &lower_key,
3360 level, root->node->start, 0);
3361 if (IS_ERR(c))
3362 return PTR_ERR(c);
3363
3364 root_add_used(root, root->nodesize);
3365
3366 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3367 btrfs_set_header_nritems(c, 1);
3368 btrfs_set_header_level(c, level);
3369 btrfs_set_header_bytenr(c, c->start);
3370 btrfs_set_header_generation(c, trans->transid);
3371 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3372 btrfs_set_header_owner(c, root->root_key.objectid);
3373
3374 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3375 BTRFS_FSID_SIZE);
3376
3377 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3378 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3379
3380 btrfs_set_node_key(c, &lower_key, 0);
3381 btrfs_set_node_blockptr(c, 0, lower->start);
3382 lower_gen = btrfs_header_generation(lower);
3383 WARN_ON(lower_gen != trans->transid);
3384
3385 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3386
3387 btrfs_mark_buffer_dirty(c);
3388
3389 old = root->node;
3390 tree_mod_log_set_root_pointer(root, c, 0);
3391 rcu_assign_pointer(root->node, c);
3392
3393 /* the super has an extra ref to root->node */
3394 free_extent_buffer(old);
3395
3396 add_root_to_dirty_list(root);
3397 extent_buffer_get(c);
3398 path->nodes[level] = c;
3399 path->locks[level] = BTRFS_WRITE_LOCK;
3400 path->slots[level] = 0;
3401 return 0;
3402 }
3403
3404 /*
3405 * worker function to insert a single pointer in a node.
3406 * the node should have enough room for the pointer already
3407 *
3408 * slot and level indicate where you want the key to go, and
3409 * blocknr is the block the key points to.
3410 */
3411 static void insert_ptr(struct btrfs_trans_handle *trans,
3412 struct btrfs_root *root, struct btrfs_path *path,
3413 struct btrfs_disk_key *key, u64 bytenr,
3414 int slot, int level)
3415 {
3416 struct extent_buffer *lower;
3417 int nritems;
3418 int ret;
3419
3420 BUG_ON(!path->nodes[level]);
3421 btrfs_assert_tree_locked(path->nodes[level]);
3422 lower = path->nodes[level];
3423 nritems = btrfs_header_nritems(lower);
3424 BUG_ON(slot > nritems);
3425 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3426 if (slot != nritems) {
3427 if (level)
3428 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3429 slot, nritems - slot);
3430 memmove_extent_buffer(lower,
3431 btrfs_node_key_ptr_offset(slot + 1),
3432 btrfs_node_key_ptr_offset(slot),
3433 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3434 }
3435 if (level) {
3436 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3437 MOD_LOG_KEY_ADD, GFP_NOFS);
3438 BUG_ON(ret < 0);
3439 }
3440 btrfs_set_node_key(lower, key, slot);
3441 btrfs_set_node_blockptr(lower, slot, bytenr);
3442 WARN_ON(trans->transid == 0);
3443 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3444 btrfs_set_header_nritems(lower, nritems + 1);
3445 btrfs_mark_buffer_dirty(lower);
3446 }
3447
3448 /*
3449 * split the node at the specified level in path in two.
3450 * The path is corrected to point to the appropriate node after the split
3451 *
3452 * Before splitting this tries to make some room in the node by pushing
3453 * left and right, if either one works, it returns right away.
3454 *
3455 * returns 0 on success and < 0 on failure
3456 */
3457 static noinline int split_node(struct btrfs_trans_handle *trans,
3458 struct btrfs_root *root,
3459 struct btrfs_path *path, int level)
3460 {
3461 struct extent_buffer *c;
3462 struct extent_buffer *split;
3463 struct btrfs_disk_key disk_key;
3464 int mid;
3465 int ret;
3466 u32 c_nritems;
3467
3468 c = path->nodes[level];
3469 WARN_ON(btrfs_header_generation(c) != trans->transid);
3470 if (c == root->node) {
3471 /*
3472 * trying to split the root, lets make a new one
3473 *
3474 * tree mod log: We don't log_removal old root in
3475 * insert_new_root, because that root buffer will be kept as a
3476 * normal node. We are going to log removal of half of the
3477 * elements below with tree_mod_log_eb_copy. We're holding a
3478 * tree lock on the buffer, which is why we cannot race with
3479 * other tree_mod_log users.
3480 */
3481 ret = insert_new_root(trans, root, path, level + 1);
3482 if (ret)
3483 return ret;
3484 } else {
3485 ret = push_nodes_for_insert(trans, root, path, level);
3486 c = path->nodes[level];
3487 if (!ret && btrfs_header_nritems(c) <
3488 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3489 return 0;
3490 if (ret < 0)
3491 return ret;
3492 }
3493
3494 c_nritems = btrfs_header_nritems(c);
3495 mid = (c_nritems + 1) / 2;
3496 btrfs_node_key(c, &disk_key, mid);
3497
3498 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3499 root->root_key.objectid,
3500 &disk_key, level, c->start, 0);
3501 if (IS_ERR(split))
3502 return PTR_ERR(split);
3503
3504 root_add_used(root, root->nodesize);
3505
3506 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3507 btrfs_set_header_level(split, btrfs_header_level(c));
3508 btrfs_set_header_bytenr(split, split->start);
3509 btrfs_set_header_generation(split, trans->transid);
3510 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3511 btrfs_set_header_owner(split, root->root_key.objectid);
3512 write_extent_buffer(split, root->fs_info->fsid,
3513 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3514 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3515 btrfs_header_chunk_tree_uuid(split),
3516 BTRFS_UUID_SIZE);
3517
3518 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3519 mid, c_nritems - mid);
3520 if (ret) {
3521 btrfs_abort_transaction(trans, root, ret);
3522 return ret;
3523 }
3524 copy_extent_buffer(split, c,
3525 btrfs_node_key_ptr_offset(0),
3526 btrfs_node_key_ptr_offset(mid),
3527 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3528 btrfs_set_header_nritems(split, c_nritems - mid);
3529 btrfs_set_header_nritems(c, mid);
3530 ret = 0;
3531
3532 btrfs_mark_buffer_dirty(c);
3533 btrfs_mark_buffer_dirty(split);
3534
3535 insert_ptr(trans, root, path, &disk_key, split->start,
3536 path->slots[level + 1] + 1, level + 1);
3537
3538 if (path->slots[level] >= mid) {
3539 path->slots[level] -= mid;
3540 btrfs_tree_unlock(c);
3541 free_extent_buffer(c);
3542 path->nodes[level] = split;
3543 path->slots[level + 1] += 1;
3544 } else {
3545 btrfs_tree_unlock(split);
3546 free_extent_buffer(split);
3547 }
3548 return ret;
3549 }
3550
3551 /*
3552 * how many bytes are required to store the items in a leaf. start
3553 * and nr indicate which items in the leaf to check. This totals up the
3554 * space used both by the item structs and the item data
3555 */
3556 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3557 {
3558 struct btrfs_item *start_item;
3559 struct btrfs_item *end_item;
3560 struct btrfs_map_token token;
3561 int data_len;
3562 int nritems = btrfs_header_nritems(l);
3563 int end = min(nritems, start + nr) - 1;
3564
3565 if (!nr)
3566 return 0;
3567 btrfs_init_map_token(&token);
3568 start_item = btrfs_item_nr(start);
3569 end_item = btrfs_item_nr(end);
3570 data_len = btrfs_token_item_offset(l, start_item, &token) +
3571 btrfs_token_item_size(l, start_item, &token);
3572 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3573 data_len += sizeof(struct btrfs_item) * nr;
3574 WARN_ON(data_len < 0);
3575 return data_len;
3576 }
3577
3578 /*
3579 * The space between the end of the leaf items and
3580 * the start of the leaf data. IOW, how much room
3581 * the leaf has left for both items and data
3582 */
3583 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3584 struct extent_buffer *leaf)
3585 {
3586 int nritems = btrfs_header_nritems(leaf);
3587 int ret;
3588 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3589 if (ret < 0) {
3590 btrfs_crit(root->fs_info,
3591 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3592 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3593 leaf_space_used(leaf, 0, nritems), nritems);
3594 }
3595 return ret;
3596 }
3597
3598 /*
3599 * min slot controls the lowest index we're willing to push to the
3600 * right. We'll push up to and including min_slot, but no lower
3601 */
3602 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3603 struct btrfs_root *root,
3604 struct btrfs_path *path,
3605 int data_size, int empty,
3606 struct extent_buffer *right,
3607 int free_space, u32 left_nritems,
3608 u32 min_slot)
3609 {
3610 struct extent_buffer *left = path->nodes[0];
3611 struct extent_buffer *upper = path->nodes[1];
3612 struct btrfs_map_token token;
3613 struct btrfs_disk_key disk_key;
3614 int slot;
3615 u32 i;
3616 int push_space = 0;
3617 int push_items = 0;
3618 struct btrfs_item *item;
3619 u32 nr;
3620 u32 right_nritems;
3621 u32 data_end;
3622 u32 this_item_size;
3623
3624 btrfs_init_map_token(&token);
3625
3626 if (empty)
3627 nr = 0;
3628 else
3629 nr = max_t(u32, 1, min_slot);
3630
3631 if (path->slots[0] >= left_nritems)
3632 push_space += data_size;
3633
3634 slot = path->slots[1];
3635 i = left_nritems - 1;
3636 while (i >= nr) {
3637 item = btrfs_item_nr(i);
3638
3639 if (!empty && push_items > 0) {
3640 if (path->slots[0] > i)
3641 break;
3642 if (path->slots[0] == i) {
3643 int space = btrfs_leaf_free_space(root, left);
3644 if (space + push_space * 2 > free_space)
3645 break;
3646 }
3647 }
3648
3649 if (path->slots[0] == i)
3650 push_space += data_size;
3651
3652 this_item_size = btrfs_item_size(left, item);
3653 if (this_item_size + sizeof(*item) + push_space > free_space)
3654 break;
3655
3656 push_items++;
3657 push_space += this_item_size + sizeof(*item);
3658 if (i == 0)
3659 break;
3660 i--;
3661 }
3662
3663 if (push_items == 0)
3664 goto out_unlock;
3665
3666 WARN_ON(!empty && push_items == left_nritems);
3667
3668 /* push left to right */
3669 right_nritems = btrfs_header_nritems(right);
3670
3671 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3672 push_space -= leaf_data_end(root, left);
3673
3674 /* make room in the right data area */
3675 data_end = leaf_data_end(root, right);
3676 memmove_extent_buffer(right,
3677 btrfs_leaf_data(right) + data_end - push_space,
3678 btrfs_leaf_data(right) + data_end,
3679 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3680
3681 /* copy from the left data area */
3682 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3683 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3684 btrfs_leaf_data(left) + leaf_data_end(root, left),
3685 push_space);
3686
3687 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3688 btrfs_item_nr_offset(0),
3689 right_nritems * sizeof(struct btrfs_item));
3690
3691 /* copy the items from left to right */
3692 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3693 btrfs_item_nr_offset(left_nritems - push_items),
3694 push_items * sizeof(struct btrfs_item));
3695
3696 /* update the item pointers */
3697 right_nritems += push_items;
3698 btrfs_set_header_nritems(right, right_nritems);
3699 push_space = BTRFS_LEAF_DATA_SIZE(root);
3700 for (i = 0; i < right_nritems; i++) {
3701 item = btrfs_item_nr(i);
3702 push_space -= btrfs_token_item_size(right, item, &token);
3703 btrfs_set_token_item_offset(right, item, push_space, &token);
3704 }
3705
3706 left_nritems -= push_items;
3707 btrfs_set_header_nritems(left, left_nritems);
3708
3709 if (left_nritems)
3710 btrfs_mark_buffer_dirty(left);
3711 else
3712 clean_tree_block(trans, root, left);
3713
3714 btrfs_mark_buffer_dirty(right);
3715
3716 btrfs_item_key(right, &disk_key, 0);
3717 btrfs_set_node_key(upper, &disk_key, slot + 1);
3718 btrfs_mark_buffer_dirty(upper);
3719
3720 /* then fixup the leaf pointer in the path */
3721 if (path->slots[0] >= left_nritems) {
3722 path->slots[0] -= left_nritems;
3723 if (btrfs_header_nritems(path->nodes[0]) == 0)
3724 clean_tree_block(trans, root, path->nodes[0]);
3725 btrfs_tree_unlock(path->nodes[0]);
3726 free_extent_buffer(path->nodes[0]);
3727 path->nodes[0] = right;
3728 path->slots[1] += 1;
3729 } else {
3730 btrfs_tree_unlock(right);
3731 free_extent_buffer(right);
3732 }
3733 return 0;
3734
3735 out_unlock:
3736 btrfs_tree_unlock(right);
3737 free_extent_buffer(right);
3738 return 1;
3739 }
3740
3741 /*
3742 * push some data in the path leaf to the right, trying to free up at
3743 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3744 *
3745 * returns 1 if the push failed because the other node didn't have enough
3746 * room, 0 if everything worked out and < 0 if there were major errors.
3747 *
3748 * this will push starting from min_slot to the end of the leaf. It won't
3749 * push any slot lower than min_slot
3750 */
3751 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3752 *root, struct btrfs_path *path,
3753 int min_data_size, int data_size,
3754 int empty, u32 min_slot)
3755 {
3756 struct extent_buffer *left = path->nodes[0];
3757 struct extent_buffer *right;
3758 struct extent_buffer *upper;
3759 int slot;
3760 int free_space;
3761 u32 left_nritems;
3762 int ret;
3763
3764 if (!path->nodes[1])
3765 return 1;
3766
3767 slot = path->slots[1];
3768 upper = path->nodes[1];
3769 if (slot >= btrfs_header_nritems(upper) - 1)
3770 return 1;
3771
3772 btrfs_assert_tree_locked(path->nodes[1]);
3773
3774 right = read_node_slot(root, upper, slot + 1);
3775 if (right == NULL)
3776 return 1;
3777
3778 btrfs_tree_lock(right);
3779 btrfs_set_lock_blocking(right);
3780
3781 free_space = btrfs_leaf_free_space(root, right);
3782 if (free_space < data_size)
3783 goto out_unlock;
3784
3785 /* cow and double check */
3786 ret = btrfs_cow_block(trans, root, right, upper,
3787 slot + 1, &right);
3788 if (ret)
3789 goto out_unlock;
3790
3791 free_space = btrfs_leaf_free_space(root, right);
3792 if (free_space < data_size)
3793 goto out_unlock;
3794
3795 left_nritems = btrfs_header_nritems(left);
3796 if (left_nritems == 0)
3797 goto out_unlock;
3798
3799 if (path->slots[0] == left_nritems && !empty) {
3800 /* Key greater than all keys in the leaf, right neighbor has
3801 * enough room for it and we're not emptying our leaf to delete
3802 * it, therefore use right neighbor to insert the new item and
3803 * no need to touch/dirty our left leaft. */
3804 btrfs_tree_unlock(left);
3805 free_extent_buffer(left);
3806 path->nodes[0] = right;
3807 path->slots[0] = 0;
3808 path->slots[1]++;
3809 return 0;
3810 }
3811
3812 return __push_leaf_right(trans, root, path, min_data_size, empty,
3813 right, free_space, left_nritems, min_slot);
3814 out_unlock:
3815 btrfs_tree_unlock(right);
3816 free_extent_buffer(right);
3817 return 1;
3818 }
3819
3820 /*
3821 * push some data in the path leaf to the left, trying to free up at
3822 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3823 *
3824 * max_slot can put a limit on how far into the leaf we'll push items. The
3825 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3826 * items
3827 */
3828 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3829 struct btrfs_root *root,
3830 struct btrfs_path *path, int data_size,
3831 int empty, struct extent_buffer *left,
3832 int free_space, u32 right_nritems,
3833 u32 max_slot)
3834 {
3835 struct btrfs_disk_key disk_key;
3836 struct extent_buffer *right = path->nodes[0];
3837 int i;
3838 int push_space = 0;
3839 int push_items = 0;
3840 struct btrfs_item *item;
3841 u32 old_left_nritems;
3842 u32 nr;
3843 int ret = 0;
3844 u32 this_item_size;
3845 u32 old_left_item_size;
3846 struct btrfs_map_token token;
3847
3848 btrfs_init_map_token(&token);
3849
3850 if (empty)
3851 nr = min(right_nritems, max_slot);
3852 else
3853 nr = min(right_nritems - 1, max_slot);
3854
3855 for (i = 0; i < nr; i++) {
3856 item = btrfs_item_nr(i);
3857
3858 if (!empty && push_items > 0) {
3859 if (path->slots[0] < i)
3860 break;
3861 if (path->slots[0] == i) {
3862 int space = btrfs_leaf_free_space(root, right);
3863 if (space + push_space * 2 > free_space)
3864 break;
3865 }
3866 }
3867
3868 if (path->slots[0] == i)
3869 push_space += data_size;
3870
3871 this_item_size = btrfs_item_size(right, item);
3872 if (this_item_size + sizeof(*item) + push_space > free_space)
3873 break;
3874
3875 push_items++;
3876 push_space += this_item_size + sizeof(*item);
3877 }
3878
3879 if (push_items == 0) {
3880 ret = 1;
3881 goto out;
3882 }
3883 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3884
3885 /* push data from right to left */
3886 copy_extent_buffer(left, right,
3887 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3888 btrfs_item_nr_offset(0),
3889 push_items * sizeof(struct btrfs_item));
3890
3891 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3892 btrfs_item_offset_nr(right, push_items - 1);
3893
3894 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3895 leaf_data_end(root, left) - push_space,
3896 btrfs_leaf_data(right) +
3897 btrfs_item_offset_nr(right, push_items - 1),
3898 push_space);
3899 old_left_nritems = btrfs_header_nritems(left);
3900 BUG_ON(old_left_nritems <= 0);
3901
3902 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3903 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3904 u32 ioff;
3905
3906 item = btrfs_item_nr(i);
3907
3908 ioff = btrfs_token_item_offset(left, item, &token);
3909 btrfs_set_token_item_offset(left, item,
3910 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3911 &token);
3912 }
3913 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3914
3915 /* fixup right node */
3916 if (push_items > right_nritems)
3917 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3918 right_nritems);
3919
3920 if (push_items < right_nritems) {
3921 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3922 leaf_data_end(root, right);
3923 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3924 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3925 btrfs_leaf_data(right) +
3926 leaf_data_end(root, right), push_space);
3927
3928 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3929 btrfs_item_nr_offset(push_items),
3930 (btrfs_header_nritems(right) - push_items) *
3931 sizeof(struct btrfs_item));
3932 }
3933 right_nritems -= push_items;
3934 btrfs_set_header_nritems(right, right_nritems);
3935 push_space = BTRFS_LEAF_DATA_SIZE(root);
3936 for (i = 0; i < right_nritems; i++) {
3937 item = btrfs_item_nr(i);
3938
3939 push_space = push_space - btrfs_token_item_size(right,
3940 item, &token);
3941 btrfs_set_token_item_offset(right, item, push_space, &token);
3942 }
3943
3944 btrfs_mark_buffer_dirty(left);
3945 if (right_nritems)
3946 btrfs_mark_buffer_dirty(right);
3947 else
3948 clean_tree_block(trans, root, right);
3949
3950 btrfs_item_key(right, &disk_key, 0);
3951 fixup_low_keys(root, path, &disk_key, 1);
3952
3953 /* then fixup the leaf pointer in the path */
3954 if (path->slots[0] < push_items) {
3955 path->slots[0] += old_left_nritems;
3956 btrfs_tree_unlock(path->nodes[0]);
3957 free_extent_buffer(path->nodes[0]);
3958 path->nodes[0] = left;
3959 path->slots[1] -= 1;
3960 } else {
3961 btrfs_tree_unlock(left);
3962 free_extent_buffer(left);
3963 path->slots[0] -= push_items;
3964 }
3965 BUG_ON(path->slots[0] < 0);
3966 return ret;
3967 out:
3968 btrfs_tree_unlock(left);
3969 free_extent_buffer(left);
3970 return ret;
3971 }
3972
3973 /*
3974 * push some data in the path leaf to the left, trying to free up at
3975 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3976 *
3977 * max_slot can put a limit on how far into the leaf we'll push items. The
3978 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3979 * items
3980 */
3981 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3982 *root, struct btrfs_path *path, int min_data_size,
3983 int data_size, int empty, u32 max_slot)
3984 {
3985 struct extent_buffer *right = path->nodes[0];
3986 struct extent_buffer *left;
3987 int slot;
3988 int free_space;
3989 u32 right_nritems;
3990 int ret = 0;
3991
3992 slot = path->slots[1];
3993 if (slot == 0)
3994 return 1;
3995 if (!path->nodes[1])
3996 return 1;
3997
3998 right_nritems = btrfs_header_nritems(right);
3999 if (right_nritems == 0)
4000 return 1;
4001
4002 btrfs_assert_tree_locked(path->nodes[1]);
4003
4004 left = read_node_slot(root, path->nodes[1], slot - 1);
4005 if (left == NULL)
4006 return 1;
4007
4008 btrfs_tree_lock(left);
4009 btrfs_set_lock_blocking(left);
4010
4011 free_space = btrfs_leaf_free_space(root, left);
4012 if (free_space < data_size) {
4013 ret = 1;
4014 goto out;
4015 }
4016
4017 /* cow and double check */
4018 ret = btrfs_cow_block(trans, root, left,
4019 path->nodes[1], slot - 1, &left);
4020 if (ret) {
4021 /* we hit -ENOSPC, but it isn't fatal here */
4022 if (ret == -ENOSPC)
4023 ret = 1;
4024 goto out;
4025 }
4026
4027 free_space = btrfs_leaf_free_space(root, left);
4028 if (free_space < data_size) {
4029 ret = 1;
4030 goto out;
4031 }
4032
4033 return __push_leaf_left(trans, root, path, min_data_size,
4034 empty, left, free_space, right_nritems,
4035 max_slot);
4036 out:
4037 btrfs_tree_unlock(left);
4038 free_extent_buffer(left);
4039 return ret;
4040 }
4041
4042 /*
4043 * split the path's leaf in two, making sure there is at least data_size
4044 * available for the resulting leaf level of the path.
4045 */
4046 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4047 struct btrfs_root *root,
4048 struct btrfs_path *path,
4049 struct extent_buffer *l,
4050 struct extent_buffer *right,
4051 int slot, int mid, int nritems)
4052 {
4053 int data_copy_size;
4054 int rt_data_off;
4055 int i;
4056 struct btrfs_disk_key disk_key;
4057 struct btrfs_map_token token;
4058
4059 btrfs_init_map_token(&token);
4060
4061 nritems = nritems - mid;
4062 btrfs_set_header_nritems(right, nritems);
4063 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4064
4065 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4066 btrfs_item_nr_offset(mid),
4067 nritems * sizeof(struct btrfs_item));
4068
4069 copy_extent_buffer(right, l,
4070 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4071 data_copy_size, btrfs_leaf_data(l) +
4072 leaf_data_end(root, l), data_copy_size);
4073
4074 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4075 btrfs_item_end_nr(l, mid);
4076
4077 for (i = 0; i < nritems; i++) {
4078 struct btrfs_item *item = btrfs_item_nr(i);
4079 u32 ioff;
4080
4081 ioff = btrfs_token_item_offset(right, item, &token);
4082 btrfs_set_token_item_offset(right, item,
4083 ioff + rt_data_off, &token);
4084 }
4085
4086 btrfs_set_header_nritems(l, mid);
4087 btrfs_item_key(right, &disk_key, 0);
4088 insert_ptr(trans, root, path, &disk_key, right->start,
4089 path->slots[1] + 1, 1);
4090
4091 btrfs_mark_buffer_dirty(right);
4092 btrfs_mark_buffer_dirty(l);
4093 BUG_ON(path->slots[0] != slot);
4094
4095 if (mid <= slot) {
4096 btrfs_tree_unlock(path->nodes[0]);
4097 free_extent_buffer(path->nodes[0]);
4098 path->nodes[0] = right;
4099 path->slots[0] -= mid;
4100 path->slots[1] += 1;
4101 } else {
4102 btrfs_tree_unlock(right);
4103 free_extent_buffer(right);
4104 }
4105
4106 BUG_ON(path->slots[0] < 0);
4107 }
4108
4109 /*
4110 * double splits happen when we need to insert a big item in the middle
4111 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4112 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4113 * A B C
4114 *
4115 * We avoid this by trying to push the items on either side of our target
4116 * into the adjacent leaves. If all goes well we can avoid the double split
4117 * completely.
4118 */
4119 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4120 struct btrfs_root *root,
4121 struct btrfs_path *path,
4122 int data_size)
4123 {
4124 int ret;
4125 int progress = 0;
4126 int slot;
4127 u32 nritems;
4128 int space_needed = data_size;
4129
4130 slot = path->slots[0];
4131 if (slot < btrfs_header_nritems(path->nodes[0]))
4132 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4133
4134 /*
4135 * try to push all the items after our slot into the
4136 * right leaf
4137 */
4138 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4139 if (ret < 0)
4140 return ret;
4141
4142 if (ret == 0)
4143 progress++;
4144
4145 nritems = btrfs_header_nritems(path->nodes[0]);
4146 /*
4147 * our goal is to get our slot at the start or end of a leaf. If
4148 * we've done so we're done
4149 */
4150 if (path->slots[0] == 0 || path->slots[0] == nritems)
4151 return 0;
4152
4153 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4154 return 0;
4155
4156 /* try to push all the items before our slot into the next leaf */
4157 slot = path->slots[0];
4158 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4159 if (ret < 0)
4160 return ret;
4161
4162 if (ret == 0)
4163 progress++;
4164
4165 if (progress)
4166 return 0;
4167 return 1;
4168 }
4169
4170 /*
4171 * split the path's leaf in two, making sure there is at least data_size
4172 * available for the resulting leaf level of the path.
4173 *
4174 * returns 0 if all went well and < 0 on failure.
4175 */
4176 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4177 struct btrfs_root *root,
4178 struct btrfs_key *ins_key,
4179 struct btrfs_path *path, int data_size,
4180 int extend)
4181 {
4182 struct btrfs_disk_key disk_key;
4183 struct extent_buffer *l;
4184 u32 nritems;
4185 int mid;
4186 int slot;
4187 struct extent_buffer *right;
4188 int ret = 0;
4189 int wret;
4190 int split;
4191 int num_doubles = 0;
4192 int tried_avoid_double = 0;
4193
4194 l = path->nodes[0];
4195 slot = path->slots[0];
4196 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4197 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4198 return -EOVERFLOW;
4199
4200 /* first try to make some room by pushing left and right */
4201 if (data_size && path->nodes[1]) {
4202 int space_needed = data_size;
4203
4204 if (slot < btrfs_header_nritems(l))
4205 space_needed -= btrfs_leaf_free_space(root, l);
4206
4207 wret = push_leaf_right(trans, root, path, space_needed,
4208 space_needed, 0, 0);
4209 if (wret < 0)
4210 return wret;
4211 if (wret) {
4212 wret = push_leaf_left(trans, root, path, space_needed,
4213 space_needed, 0, (u32)-1);
4214 if (wret < 0)
4215 return wret;
4216 }
4217 l = path->nodes[0];
4218
4219 /* did the pushes work? */
4220 if (btrfs_leaf_free_space(root, l) >= data_size)
4221 return 0;
4222 }
4223
4224 if (!path->nodes[1]) {
4225 ret = insert_new_root(trans, root, path, 1);
4226 if (ret)
4227 return ret;
4228 }
4229 again:
4230 split = 1;
4231 l = path->nodes[0];
4232 slot = path->slots[0];
4233 nritems = btrfs_header_nritems(l);
4234 mid = (nritems + 1) / 2;
4235
4236 if (mid <= slot) {
4237 if (nritems == 1 ||
4238 leaf_space_used(l, mid, nritems - mid) + data_size >
4239 BTRFS_LEAF_DATA_SIZE(root)) {
4240 if (slot >= nritems) {
4241 split = 0;
4242 } else {
4243 mid = slot;
4244 if (mid != nritems &&
4245 leaf_space_used(l, mid, nritems - mid) +
4246 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4247 if (data_size && !tried_avoid_double)
4248 goto push_for_double;
4249 split = 2;
4250 }
4251 }
4252 }
4253 } else {
4254 if (leaf_space_used(l, 0, mid) + data_size >
4255 BTRFS_LEAF_DATA_SIZE(root)) {
4256 if (!extend && data_size && slot == 0) {
4257 split = 0;
4258 } else if ((extend || !data_size) && slot == 0) {
4259 mid = 1;
4260 } else {
4261 mid = slot;
4262 if (mid != nritems &&
4263 leaf_space_used(l, mid, nritems - mid) +
4264 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4265 if (data_size && !tried_avoid_double)
4266 goto push_for_double;
4267 split = 2;
4268 }
4269 }
4270 }
4271 }
4272
4273 if (split == 0)
4274 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4275 else
4276 btrfs_item_key(l, &disk_key, mid);
4277
4278 right = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
4279 root->root_key.objectid,
4280 &disk_key, 0, l->start, 0);
4281 if (IS_ERR(right))
4282 return PTR_ERR(right);
4283
4284 root_add_used(root, root->nodesize);
4285
4286 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4287 btrfs_set_header_bytenr(right, right->start);
4288 btrfs_set_header_generation(right, trans->transid);
4289 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4290 btrfs_set_header_owner(right, root->root_key.objectid);
4291 btrfs_set_header_level(right, 0);
4292 write_extent_buffer(right, root->fs_info->fsid,
4293 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4294
4295 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4296 btrfs_header_chunk_tree_uuid(right),
4297 BTRFS_UUID_SIZE);
4298
4299 if (split == 0) {
4300 if (mid <= slot) {
4301 btrfs_set_header_nritems(right, 0);
4302 insert_ptr(trans, root, path, &disk_key, right->start,
4303 path->slots[1] + 1, 1);
4304 btrfs_tree_unlock(path->nodes[0]);
4305 free_extent_buffer(path->nodes[0]);
4306 path->nodes[0] = right;
4307 path->slots[0] = 0;
4308 path->slots[1] += 1;
4309 } else {
4310 btrfs_set_header_nritems(right, 0);
4311 insert_ptr(trans, root, path, &disk_key, right->start,
4312 path->slots[1], 1);
4313 btrfs_tree_unlock(path->nodes[0]);
4314 free_extent_buffer(path->nodes[0]);
4315 path->nodes[0] = right;
4316 path->slots[0] = 0;
4317 if (path->slots[1] == 0)
4318 fixup_low_keys(root, path, &disk_key, 1);
4319 }
4320 btrfs_mark_buffer_dirty(right);
4321 return ret;
4322 }
4323
4324 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4325
4326 if (split == 2) {
4327 BUG_ON(num_doubles != 0);
4328 num_doubles++;
4329 goto again;
4330 }
4331
4332 return 0;
4333
4334 push_for_double:
4335 push_for_double_split(trans, root, path, data_size);
4336 tried_avoid_double = 1;
4337 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4338 return 0;
4339 goto again;
4340 }
4341
4342 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4343 struct btrfs_root *root,
4344 struct btrfs_path *path, int ins_len)
4345 {
4346 struct btrfs_key key;
4347 struct extent_buffer *leaf;
4348 struct btrfs_file_extent_item *fi;
4349 u64 extent_len = 0;
4350 u32 item_size;
4351 int ret;
4352
4353 leaf = path->nodes[0];
4354 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4355
4356 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4357 key.type != BTRFS_EXTENT_CSUM_KEY);
4358
4359 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4360 return 0;
4361
4362 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4363 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4364 fi = btrfs_item_ptr(leaf, path->slots[0],
4365 struct btrfs_file_extent_item);
4366 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4367 }
4368 btrfs_release_path(path);
4369
4370 path->keep_locks = 1;
4371 path->search_for_split = 1;
4372 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4373 path->search_for_split = 0;
4374 if (ret < 0)
4375 goto err;
4376
4377 ret = -EAGAIN;
4378 leaf = path->nodes[0];
4379 /* if our item isn't there or got smaller, return now */
4380 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4381 goto err;
4382
4383 /* the leaf has changed, it now has room. return now */
4384 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4385 goto err;
4386
4387 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4388 fi = btrfs_item_ptr(leaf, path->slots[0],
4389 struct btrfs_file_extent_item);
4390 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4391 goto err;
4392 }
4393
4394 btrfs_set_path_blocking(path);
4395 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4396 if (ret)
4397 goto err;
4398
4399 path->keep_locks = 0;
4400 btrfs_unlock_up_safe(path, 1);
4401 return 0;
4402 err:
4403 path->keep_locks = 0;
4404 return ret;
4405 }
4406
4407 static noinline int split_item(struct btrfs_trans_handle *trans,
4408 struct btrfs_root *root,
4409 struct btrfs_path *path,
4410 struct btrfs_key *new_key,
4411 unsigned long split_offset)
4412 {
4413 struct extent_buffer *leaf;
4414 struct btrfs_item *item;
4415 struct btrfs_item *new_item;
4416 int slot;
4417 char *buf;
4418 u32 nritems;
4419 u32 item_size;
4420 u32 orig_offset;
4421 struct btrfs_disk_key disk_key;
4422
4423 leaf = path->nodes[0];
4424 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4425
4426 btrfs_set_path_blocking(path);
4427
4428 item = btrfs_item_nr(path->slots[0]);
4429 orig_offset = btrfs_item_offset(leaf, item);
4430 item_size = btrfs_item_size(leaf, item);
4431
4432 buf = kmalloc(item_size, GFP_NOFS);
4433 if (!buf)
4434 return -ENOMEM;
4435
4436 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4437 path->slots[0]), item_size);
4438
4439 slot = path->slots[0] + 1;
4440 nritems = btrfs_header_nritems(leaf);
4441 if (slot != nritems) {
4442 /* shift the items */
4443 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4444 btrfs_item_nr_offset(slot),
4445 (nritems - slot) * sizeof(struct btrfs_item));
4446 }
4447
4448 btrfs_cpu_key_to_disk(&disk_key, new_key);
4449 btrfs_set_item_key(leaf, &disk_key, slot);
4450
4451 new_item = btrfs_item_nr(slot);
4452
4453 btrfs_set_item_offset(leaf, new_item, orig_offset);
4454 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4455
4456 btrfs_set_item_offset(leaf, item,
4457 orig_offset + item_size - split_offset);
4458 btrfs_set_item_size(leaf, item, split_offset);
4459
4460 btrfs_set_header_nritems(leaf, nritems + 1);
4461
4462 /* write the data for the start of the original item */
4463 write_extent_buffer(leaf, buf,
4464 btrfs_item_ptr_offset(leaf, path->slots[0]),
4465 split_offset);
4466
4467 /* write the data for the new item */
4468 write_extent_buffer(leaf, buf + split_offset,
4469 btrfs_item_ptr_offset(leaf, slot),
4470 item_size - split_offset);
4471 btrfs_mark_buffer_dirty(leaf);
4472
4473 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4474 kfree(buf);
4475 return 0;
4476 }
4477
4478 /*
4479 * This function splits a single item into two items,
4480 * giving 'new_key' to the new item and splitting the
4481 * old one at split_offset (from the start of the item).
4482 *
4483 * The path may be released by this operation. After
4484 * the split, the path is pointing to the old item. The
4485 * new item is going to be in the same node as the old one.
4486 *
4487 * Note, the item being split must be smaller enough to live alone on
4488 * a tree block with room for one extra struct btrfs_item
4489 *
4490 * This allows us to split the item in place, keeping a lock on the
4491 * leaf the entire time.
4492 */
4493 int btrfs_split_item(struct btrfs_trans_handle *trans,
4494 struct btrfs_root *root,
4495 struct btrfs_path *path,
4496 struct btrfs_key *new_key,
4497 unsigned long split_offset)
4498 {
4499 int ret;
4500 ret = setup_leaf_for_split(trans, root, path,
4501 sizeof(struct btrfs_item));
4502 if (ret)
4503 return ret;
4504
4505 ret = split_item(trans, root, path, new_key, split_offset);
4506 return ret;
4507 }
4508
4509 /*
4510 * This function duplicate a item, giving 'new_key' to the new item.
4511 * It guarantees both items live in the same tree leaf and the new item
4512 * is contiguous with the original item.
4513 *
4514 * This allows us to split file extent in place, keeping a lock on the
4515 * leaf the entire time.
4516 */
4517 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4518 struct btrfs_root *root,
4519 struct btrfs_path *path,
4520 struct btrfs_key *new_key)
4521 {
4522 struct extent_buffer *leaf;
4523 int ret;
4524 u32 item_size;
4525
4526 leaf = path->nodes[0];
4527 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4528 ret = setup_leaf_for_split(trans, root, path,
4529 item_size + sizeof(struct btrfs_item));
4530 if (ret)
4531 return ret;
4532
4533 path->slots[0]++;
4534 setup_items_for_insert(root, path, new_key, &item_size,
4535 item_size, item_size +
4536 sizeof(struct btrfs_item), 1);
4537 leaf = path->nodes[0];
4538 memcpy_extent_buffer(leaf,
4539 btrfs_item_ptr_offset(leaf, path->slots[0]),
4540 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4541 item_size);
4542 return 0;
4543 }
4544
4545 /*
4546 * make the item pointed to by the path smaller. new_size indicates
4547 * how small to make it, and from_end tells us if we just chop bytes
4548 * off the end of the item or if we shift the item to chop bytes off
4549 * the front.
4550 */
4551 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4552 u32 new_size, int from_end)
4553 {
4554 int slot;
4555 struct extent_buffer *leaf;
4556 struct btrfs_item *item;
4557 u32 nritems;
4558 unsigned int data_end;
4559 unsigned int old_data_start;
4560 unsigned int old_size;
4561 unsigned int size_diff;
4562 int i;
4563 struct btrfs_map_token token;
4564
4565 btrfs_init_map_token(&token);
4566
4567 leaf = path->nodes[0];
4568 slot = path->slots[0];
4569
4570 old_size = btrfs_item_size_nr(leaf, slot);
4571 if (old_size == new_size)
4572 return;
4573
4574 nritems = btrfs_header_nritems(leaf);
4575 data_end = leaf_data_end(root, leaf);
4576
4577 old_data_start = btrfs_item_offset_nr(leaf, slot);
4578
4579 size_diff = old_size - new_size;
4580
4581 BUG_ON(slot < 0);
4582 BUG_ON(slot >= nritems);
4583
4584 /*
4585 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4586 */
4587 /* first correct the data pointers */
4588 for (i = slot; i < nritems; i++) {
4589 u32 ioff;
4590 item = btrfs_item_nr(i);
4591
4592 ioff = btrfs_token_item_offset(leaf, item, &token);
4593 btrfs_set_token_item_offset(leaf, item,
4594 ioff + size_diff, &token);
4595 }
4596
4597 /* shift the data */
4598 if (from_end) {
4599 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4600 data_end + size_diff, btrfs_leaf_data(leaf) +
4601 data_end, old_data_start + new_size - data_end);
4602 } else {
4603 struct btrfs_disk_key disk_key;
4604 u64 offset;
4605
4606 btrfs_item_key(leaf, &disk_key, slot);
4607
4608 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4609 unsigned long ptr;
4610 struct btrfs_file_extent_item *fi;
4611
4612 fi = btrfs_item_ptr(leaf, slot,
4613 struct btrfs_file_extent_item);
4614 fi = (struct btrfs_file_extent_item *)(
4615 (unsigned long)fi - size_diff);
4616
4617 if (btrfs_file_extent_type(leaf, fi) ==
4618 BTRFS_FILE_EXTENT_INLINE) {
4619 ptr = btrfs_item_ptr_offset(leaf, slot);
4620 memmove_extent_buffer(leaf, ptr,
4621 (unsigned long)fi,
4622 offsetof(struct btrfs_file_extent_item,
4623 disk_bytenr));
4624 }
4625 }
4626
4627 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4628 data_end + size_diff, btrfs_leaf_data(leaf) +
4629 data_end, old_data_start - data_end);
4630
4631 offset = btrfs_disk_key_offset(&disk_key);
4632 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4633 btrfs_set_item_key(leaf, &disk_key, slot);
4634 if (slot == 0)
4635 fixup_low_keys(root, path, &disk_key, 1);
4636 }
4637
4638 item = btrfs_item_nr(slot);
4639 btrfs_set_item_size(leaf, item, new_size);
4640 btrfs_mark_buffer_dirty(leaf);
4641
4642 if (btrfs_leaf_free_space(root, leaf) < 0) {
4643 btrfs_print_leaf(root, leaf);
4644 BUG();
4645 }
4646 }
4647
4648 /*
4649 * make the item pointed to by the path bigger, data_size is the added size.
4650 */
4651 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4652 u32 data_size)
4653 {
4654 int slot;
4655 struct extent_buffer *leaf;
4656 struct btrfs_item *item;
4657 u32 nritems;
4658 unsigned int data_end;
4659 unsigned int old_data;
4660 unsigned int old_size;
4661 int i;
4662 struct btrfs_map_token token;
4663
4664 btrfs_init_map_token(&token);
4665
4666 leaf = path->nodes[0];
4667
4668 nritems = btrfs_header_nritems(leaf);
4669 data_end = leaf_data_end(root, leaf);
4670
4671 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4672 btrfs_print_leaf(root, leaf);
4673 BUG();
4674 }
4675 slot = path->slots[0];
4676 old_data = btrfs_item_end_nr(leaf, slot);
4677
4678 BUG_ON(slot < 0);
4679 if (slot >= nritems) {
4680 btrfs_print_leaf(root, leaf);
4681 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4682 slot, nritems);
4683 BUG_ON(1);
4684 }
4685
4686 /*
4687 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4688 */
4689 /* first correct the data pointers */
4690 for (i = slot; i < nritems; i++) {
4691 u32 ioff;
4692 item = btrfs_item_nr(i);
4693
4694 ioff = btrfs_token_item_offset(leaf, item, &token);
4695 btrfs_set_token_item_offset(leaf, item,
4696 ioff - data_size, &token);
4697 }
4698
4699 /* shift the data */
4700 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4701 data_end - data_size, btrfs_leaf_data(leaf) +
4702 data_end, old_data - data_end);
4703
4704 data_end = old_data;
4705 old_size = btrfs_item_size_nr(leaf, slot);
4706 item = btrfs_item_nr(slot);
4707 btrfs_set_item_size(leaf, item, old_size + data_size);
4708 btrfs_mark_buffer_dirty(leaf);
4709
4710 if (btrfs_leaf_free_space(root, leaf) < 0) {
4711 btrfs_print_leaf(root, leaf);
4712 BUG();
4713 }
4714 }
4715
4716 /*
4717 * this is a helper for btrfs_insert_empty_items, the main goal here is
4718 * to save stack depth by doing the bulk of the work in a function
4719 * that doesn't call btrfs_search_slot
4720 */
4721 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4722 struct btrfs_key *cpu_key, u32 *data_size,
4723 u32 total_data, u32 total_size, int nr)
4724 {
4725 struct btrfs_item *item;
4726 int i;
4727 u32 nritems;
4728 unsigned int data_end;
4729 struct btrfs_disk_key disk_key;
4730 struct extent_buffer *leaf;
4731 int slot;
4732 struct btrfs_map_token token;
4733
4734 if (path->slots[0] == 0) {
4735 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4736 fixup_low_keys(root, path, &disk_key, 1);
4737 }
4738 btrfs_unlock_up_safe(path, 1);
4739
4740 btrfs_init_map_token(&token);
4741
4742 leaf = path->nodes[0];
4743 slot = path->slots[0];
4744
4745 nritems = btrfs_header_nritems(leaf);
4746 data_end = leaf_data_end(root, leaf);
4747
4748 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4749 btrfs_print_leaf(root, leaf);
4750 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4751 total_size, btrfs_leaf_free_space(root, leaf));
4752 BUG();
4753 }
4754
4755 if (slot != nritems) {
4756 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4757
4758 if (old_data < data_end) {
4759 btrfs_print_leaf(root, leaf);
4760 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4761 slot, old_data, data_end);
4762 BUG_ON(1);
4763 }
4764 /*
4765 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4766 */
4767 /* first correct the data pointers */
4768 for (i = slot; i < nritems; i++) {
4769 u32 ioff;
4770
4771 item = btrfs_item_nr( i);
4772 ioff = btrfs_token_item_offset(leaf, item, &token);
4773 btrfs_set_token_item_offset(leaf, item,
4774 ioff - total_data, &token);
4775 }
4776 /* shift the items */
4777 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4778 btrfs_item_nr_offset(slot),
4779 (nritems - slot) * sizeof(struct btrfs_item));
4780
4781 /* shift the data */
4782 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4783 data_end - total_data, btrfs_leaf_data(leaf) +
4784 data_end, old_data - data_end);
4785 data_end = old_data;
4786 }
4787
4788 /* setup the item for the new data */
4789 for (i = 0; i < nr; i++) {
4790 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4791 btrfs_set_item_key(leaf, &disk_key, slot + i);
4792 item = btrfs_item_nr(slot + i);
4793 btrfs_set_token_item_offset(leaf, item,
4794 data_end - data_size[i], &token);
4795 data_end -= data_size[i];
4796 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4797 }
4798
4799 btrfs_set_header_nritems(leaf, nritems + nr);
4800 btrfs_mark_buffer_dirty(leaf);
4801
4802 if (btrfs_leaf_free_space(root, leaf) < 0) {
4803 btrfs_print_leaf(root, leaf);
4804 BUG();
4805 }
4806 }
4807
4808 /*
4809 * Given a key and some data, insert items into the tree.
4810 * This does all the path init required, making room in the tree if needed.
4811 */
4812 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4813 struct btrfs_root *root,
4814 struct btrfs_path *path,
4815 struct btrfs_key *cpu_key, u32 *data_size,
4816 int nr)
4817 {
4818 int ret = 0;
4819 int slot;
4820 int i;
4821 u32 total_size = 0;
4822 u32 total_data = 0;
4823
4824 for (i = 0; i < nr; i++)
4825 total_data += data_size[i];
4826
4827 total_size = total_data + (nr * sizeof(struct btrfs_item));
4828 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4829 if (ret == 0)
4830 return -EEXIST;
4831 if (ret < 0)
4832 return ret;
4833
4834 slot = path->slots[0];
4835 BUG_ON(slot < 0);
4836
4837 setup_items_for_insert(root, path, cpu_key, data_size,
4838 total_data, total_size, nr);
4839 return 0;
4840 }
4841
4842 /*
4843 * Given a key and some data, insert an item into the tree.
4844 * This does all the path init required, making room in the tree if needed.
4845 */
4846 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4847 *root, struct btrfs_key *cpu_key, void *data, u32
4848 data_size)
4849 {
4850 int ret = 0;
4851 struct btrfs_path *path;
4852 struct extent_buffer *leaf;
4853 unsigned long ptr;
4854
4855 path = btrfs_alloc_path();
4856 if (!path)
4857 return -ENOMEM;
4858 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4859 if (!ret) {
4860 leaf = path->nodes[0];
4861 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4862 write_extent_buffer(leaf, data, ptr, data_size);
4863 btrfs_mark_buffer_dirty(leaf);
4864 }
4865 btrfs_free_path(path);
4866 return ret;
4867 }
4868
4869 /*
4870 * delete the pointer from a given node.
4871 *
4872 * the tree should have been previously balanced so the deletion does not
4873 * empty a node.
4874 */
4875 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4876 int level, int slot)
4877 {
4878 struct extent_buffer *parent = path->nodes[level];
4879 u32 nritems;
4880 int ret;
4881
4882 nritems = btrfs_header_nritems(parent);
4883 if (slot != nritems - 1) {
4884 if (level)
4885 tree_mod_log_eb_move(root->fs_info, parent, slot,
4886 slot + 1, nritems - slot - 1);
4887 memmove_extent_buffer(parent,
4888 btrfs_node_key_ptr_offset(slot),
4889 btrfs_node_key_ptr_offset(slot + 1),
4890 sizeof(struct btrfs_key_ptr) *
4891 (nritems - slot - 1));
4892 } else if (level) {
4893 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4894 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4895 BUG_ON(ret < 0);
4896 }
4897
4898 nritems--;
4899 btrfs_set_header_nritems(parent, nritems);
4900 if (nritems == 0 && parent == root->node) {
4901 BUG_ON(btrfs_header_level(root->node) != 1);
4902 /* just turn the root into a leaf and break */
4903 btrfs_set_header_level(root->node, 0);
4904 } else if (slot == 0) {
4905 struct btrfs_disk_key disk_key;
4906
4907 btrfs_node_key(parent, &disk_key, 0);
4908 fixup_low_keys(root, path, &disk_key, level + 1);
4909 }
4910 btrfs_mark_buffer_dirty(parent);
4911 }
4912
4913 /*
4914 * a helper function to delete the leaf pointed to by path->slots[1] and
4915 * path->nodes[1].
4916 *
4917 * This deletes the pointer in path->nodes[1] and frees the leaf
4918 * block extent. zero is returned if it all worked out, < 0 otherwise.
4919 *
4920 * The path must have already been setup for deleting the leaf, including
4921 * all the proper balancing. path->nodes[1] must be locked.
4922 */
4923 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4924 struct btrfs_root *root,
4925 struct btrfs_path *path,
4926 struct extent_buffer *leaf)
4927 {
4928 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4929 del_ptr(root, path, 1, path->slots[1]);
4930
4931 /*
4932 * btrfs_free_extent is expensive, we want to make sure we
4933 * aren't holding any locks when we call it
4934 */
4935 btrfs_unlock_up_safe(path, 0);
4936
4937 root_sub_used(root, leaf->len);
4938
4939 extent_buffer_get(leaf);
4940 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4941 free_extent_buffer_stale(leaf);
4942 }
4943 /*
4944 * delete the item at the leaf level in path. If that empties
4945 * the leaf, remove it from the tree
4946 */
4947 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4948 struct btrfs_path *path, int slot, int nr)
4949 {
4950 struct extent_buffer *leaf;
4951 struct btrfs_item *item;
4952 int last_off;
4953 int dsize = 0;
4954 int ret = 0;
4955 int wret;
4956 int i;
4957 u32 nritems;
4958 struct btrfs_map_token token;
4959
4960 btrfs_init_map_token(&token);
4961
4962 leaf = path->nodes[0];
4963 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4964
4965 for (i = 0; i < nr; i++)
4966 dsize += btrfs_item_size_nr(leaf, slot + i);
4967
4968 nritems = btrfs_header_nritems(leaf);
4969
4970 if (slot + nr != nritems) {
4971 int data_end = leaf_data_end(root, leaf);
4972
4973 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4974 data_end + dsize,
4975 btrfs_leaf_data(leaf) + data_end,
4976 last_off - data_end);
4977
4978 for (i = slot + nr; i < nritems; i++) {
4979 u32 ioff;
4980
4981 item = btrfs_item_nr(i);
4982 ioff = btrfs_token_item_offset(leaf, item, &token);
4983 btrfs_set_token_item_offset(leaf, item,
4984 ioff + dsize, &token);
4985 }
4986
4987 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4988 btrfs_item_nr_offset(slot + nr),
4989 sizeof(struct btrfs_item) *
4990 (nritems - slot - nr));
4991 }
4992 btrfs_set_header_nritems(leaf, nritems - nr);
4993 nritems -= nr;
4994
4995 /* delete the leaf if we've emptied it */
4996 if (nritems == 0) {
4997 if (leaf == root->node) {
4998 btrfs_set_header_level(leaf, 0);
4999 } else {
5000 btrfs_set_path_blocking(path);
5001 clean_tree_block(trans, root, leaf);
5002 btrfs_del_leaf(trans, root, path, leaf);
5003 }
5004 } else {
5005 int used = leaf_space_used(leaf, 0, nritems);
5006 if (slot == 0) {
5007 struct btrfs_disk_key disk_key;
5008
5009 btrfs_item_key(leaf, &disk_key, 0);
5010 fixup_low_keys(root, path, &disk_key, 1);
5011 }
5012
5013 /* delete the leaf if it is mostly empty */
5014 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5015 /* push_leaf_left fixes the path.
5016 * make sure the path still points to our leaf
5017 * for possible call to del_ptr below
5018 */
5019 slot = path->slots[1];
5020 extent_buffer_get(leaf);
5021
5022 btrfs_set_path_blocking(path);
5023 wret = push_leaf_left(trans, root, path, 1, 1,
5024 1, (u32)-1);
5025 if (wret < 0 && wret != -ENOSPC)
5026 ret = wret;
5027
5028 if (path->nodes[0] == leaf &&
5029 btrfs_header_nritems(leaf)) {
5030 wret = push_leaf_right(trans, root, path, 1,
5031 1, 1, 0);
5032 if (wret < 0 && wret != -ENOSPC)
5033 ret = wret;
5034 }
5035
5036 if (btrfs_header_nritems(leaf) == 0) {
5037 path->slots[1] = slot;
5038 btrfs_del_leaf(trans, root, path, leaf);
5039 free_extent_buffer(leaf);
5040 ret = 0;
5041 } else {
5042 /* if we're still in the path, make sure
5043 * we're dirty. Otherwise, one of the
5044 * push_leaf functions must have already
5045 * dirtied this buffer
5046 */
5047 if (path->nodes[0] == leaf)
5048 btrfs_mark_buffer_dirty(leaf);
5049 free_extent_buffer(leaf);
5050 }
5051 } else {
5052 btrfs_mark_buffer_dirty(leaf);
5053 }
5054 }
5055 return ret;
5056 }
5057
5058 /*
5059 * search the tree again to find a leaf with lesser keys
5060 * returns 0 if it found something or 1 if there are no lesser leaves.
5061 * returns < 0 on io errors.
5062 *
5063 * This may release the path, and so you may lose any locks held at the
5064 * time you call it.
5065 */
5066 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5067 {
5068 struct btrfs_key key;
5069 struct btrfs_disk_key found_key;
5070 int ret;
5071
5072 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5073
5074 if (key.offset > 0) {
5075 key.offset--;
5076 } else if (key.type > 0) {
5077 key.type--;
5078 key.offset = (u64)-1;
5079 } else if (key.objectid > 0) {
5080 key.objectid--;
5081 key.type = (u8)-1;
5082 key.offset = (u64)-1;
5083 } else {
5084 return 1;
5085 }
5086
5087 btrfs_release_path(path);
5088 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5089 if (ret < 0)
5090 return ret;
5091 btrfs_item_key(path->nodes[0], &found_key, 0);
5092 ret = comp_keys(&found_key, &key);
5093 /*
5094 * We might have had an item with the previous key in the tree right
5095 * before we released our path. And after we released our path, that
5096 * item might have been pushed to the first slot (0) of the leaf we
5097 * were holding due to a tree balance. Alternatively, an item with the
5098 * previous key can exist as the only element of a leaf (big fat item).
5099 * Therefore account for these 2 cases, so that our callers (like
5100 * btrfs_previous_item) don't miss an existing item with a key matching
5101 * the previous key we computed above.
5102 */
5103 if (ret <= 0)
5104 return 0;
5105 return 1;
5106 }
5107
5108 /*
5109 * A helper function to walk down the tree starting at min_key, and looking
5110 * for nodes or leaves that are have a minimum transaction id.
5111 * This is used by the btree defrag code, and tree logging
5112 *
5113 * This does not cow, but it does stuff the starting key it finds back
5114 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5115 * key and get a writable path.
5116 *
5117 * This does lock as it descends, and path->keep_locks should be set
5118 * to 1 by the caller.
5119 *
5120 * This honors path->lowest_level to prevent descent past a given level
5121 * of the tree.
5122 *
5123 * min_trans indicates the oldest transaction that you are interested
5124 * in walking through. Any nodes or leaves older than min_trans are
5125 * skipped over (without reading them).
5126 *
5127 * returns zero if something useful was found, < 0 on error and 1 if there
5128 * was nothing in the tree that matched the search criteria.
5129 */
5130 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5131 struct btrfs_path *path,
5132 u64 min_trans)
5133 {
5134 struct extent_buffer *cur;
5135 struct btrfs_key found_key;
5136 int slot;
5137 int sret;
5138 u32 nritems;
5139 int level;
5140 int ret = 1;
5141 int keep_locks = path->keep_locks;
5142
5143 path->keep_locks = 1;
5144 again:
5145 cur = btrfs_read_lock_root_node(root);
5146 level = btrfs_header_level(cur);
5147 WARN_ON(path->nodes[level]);
5148 path->nodes[level] = cur;
5149 path->locks[level] = BTRFS_READ_LOCK;
5150
5151 if (btrfs_header_generation(cur) < min_trans) {
5152 ret = 1;
5153 goto out;
5154 }
5155 while (1) {
5156 nritems = btrfs_header_nritems(cur);
5157 level = btrfs_header_level(cur);
5158 sret = bin_search(cur, min_key, level, &slot);
5159
5160 /* at the lowest level, we're done, setup the path and exit */
5161 if (level == path->lowest_level) {
5162 if (slot >= nritems)
5163 goto find_next_key;
5164 ret = 0;
5165 path->slots[level] = slot;
5166 btrfs_item_key_to_cpu(cur, &found_key, slot);
5167 goto out;
5168 }
5169 if (sret && slot > 0)
5170 slot--;
5171 /*
5172 * check this node pointer against the min_trans parameters.
5173 * If it is too old, old, skip to the next one.
5174 */
5175 while (slot < nritems) {
5176 u64 gen;
5177
5178 gen = btrfs_node_ptr_generation(cur, slot);
5179 if (gen < min_trans) {
5180 slot++;
5181 continue;
5182 }
5183 break;
5184 }
5185 find_next_key:
5186 /*
5187 * we didn't find a candidate key in this node, walk forward
5188 * and find another one
5189 */
5190 if (slot >= nritems) {
5191 path->slots[level] = slot;
5192 btrfs_set_path_blocking(path);
5193 sret = btrfs_find_next_key(root, path, min_key, level,
5194 min_trans);
5195 if (sret == 0) {
5196 btrfs_release_path(path);
5197 goto again;
5198 } else {
5199 goto out;
5200 }
5201 }
5202 /* save our key for returning back */
5203 btrfs_node_key_to_cpu(cur, &found_key, slot);
5204 path->slots[level] = slot;
5205 if (level == path->lowest_level) {
5206 ret = 0;
5207 goto out;
5208 }
5209 btrfs_set_path_blocking(path);
5210 cur = read_node_slot(root, cur, slot);
5211 BUG_ON(!cur); /* -ENOMEM */
5212
5213 btrfs_tree_read_lock(cur);
5214
5215 path->locks[level - 1] = BTRFS_READ_LOCK;
5216 path->nodes[level - 1] = cur;
5217 unlock_up(path, level, 1, 0, NULL);
5218 btrfs_clear_path_blocking(path, NULL, 0);
5219 }
5220 out:
5221 path->keep_locks = keep_locks;
5222 if (ret == 0) {
5223 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5224 btrfs_set_path_blocking(path);
5225 memcpy(min_key, &found_key, sizeof(found_key));
5226 }
5227 return ret;
5228 }
5229
5230 static void tree_move_down(struct btrfs_root *root,
5231 struct btrfs_path *path,
5232 int *level, int root_level)
5233 {
5234 BUG_ON(*level == 0);
5235 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5236 path->slots[*level]);
5237 path->slots[*level - 1] = 0;
5238 (*level)--;
5239 }
5240
5241 static int tree_move_next_or_upnext(struct btrfs_root *root,
5242 struct btrfs_path *path,
5243 int *level, int root_level)
5244 {
5245 int ret = 0;
5246 int nritems;
5247 nritems = btrfs_header_nritems(path->nodes[*level]);
5248
5249 path->slots[*level]++;
5250
5251 while (path->slots[*level] >= nritems) {
5252 if (*level == root_level)
5253 return -1;
5254
5255 /* move upnext */
5256 path->slots[*level] = 0;
5257 free_extent_buffer(path->nodes[*level]);
5258 path->nodes[*level] = NULL;
5259 (*level)++;
5260 path->slots[*level]++;
5261
5262 nritems = btrfs_header_nritems(path->nodes[*level]);
5263 ret = 1;
5264 }
5265 return ret;
5266 }
5267
5268 /*
5269 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5270 * or down.
5271 */
5272 static int tree_advance(struct btrfs_root *root,
5273 struct btrfs_path *path,
5274 int *level, int root_level,
5275 int allow_down,
5276 struct btrfs_key *key)
5277 {
5278 int ret;
5279
5280 if (*level == 0 || !allow_down) {
5281 ret = tree_move_next_or_upnext(root, path, level, root_level);
5282 } else {
5283 tree_move_down(root, path, level, root_level);
5284 ret = 0;
5285 }
5286 if (ret >= 0) {
5287 if (*level == 0)
5288 btrfs_item_key_to_cpu(path->nodes[*level], key,
5289 path->slots[*level]);
5290 else
5291 btrfs_node_key_to_cpu(path->nodes[*level], key,
5292 path->slots[*level]);
5293 }
5294 return ret;
5295 }
5296
5297 static int tree_compare_item(struct btrfs_root *left_root,
5298 struct btrfs_path *left_path,
5299 struct btrfs_path *right_path,
5300 char *tmp_buf)
5301 {
5302 int cmp;
5303 int len1, len2;
5304 unsigned long off1, off2;
5305
5306 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5307 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5308 if (len1 != len2)
5309 return 1;
5310
5311 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5312 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5313 right_path->slots[0]);
5314
5315 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5316
5317 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5318 if (cmp)
5319 return 1;
5320 return 0;
5321 }
5322
5323 #define ADVANCE 1
5324 #define ADVANCE_ONLY_NEXT -1
5325
5326 /*
5327 * This function compares two trees and calls the provided callback for
5328 * every changed/new/deleted item it finds.
5329 * If shared tree blocks are encountered, whole subtrees are skipped, making
5330 * the compare pretty fast on snapshotted subvolumes.
5331 *
5332 * This currently works on commit roots only. As commit roots are read only,
5333 * we don't do any locking. The commit roots are protected with transactions.
5334 * Transactions are ended and rejoined when a commit is tried in between.
5335 *
5336 * This function checks for modifications done to the trees while comparing.
5337 * If it detects a change, it aborts immediately.
5338 */
5339 int btrfs_compare_trees(struct btrfs_root *left_root,
5340 struct btrfs_root *right_root,
5341 btrfs_changed_cb_t changed_cb, void *ctx)
5342 {
5343 int ret;
5344 int cmp;
5345 struct btrfs_path *left_path = NULL;
5346 struct btrfs_path *right_path = NULL;
5347 struct btrfs_key left_key;
5348 struct btrfs_key right_key;
5349 char *tmp_buf = NULL;
5350 int left_root_level;
5351 int right_root_level;
5352 int left_level;
5353 int right_level;
5354 int left_end_reached;
5355 int right_end_reached;
5356 int advance_left;
5357 int advance_right;
5358 u64 left_blockptr;
5359 u64 right_blockptr;
5360 u64 left_gen;
5361 u64 right_gen;
5362
5363 left_path = btrfs_alloc_path();
5364 if (!left_path) {
5365 ret = -ENOMEM;
5366 goto out;
5367 }
5368 right_path = btrfs_alloc_path();
5369 if (!right_path) {
5370 ret = -ENOMEM;
5371 goto out;
5372 }
5373
5374 tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
5375 if (!tmp_buf) {
5376 ret = -ENOMEM;
5377 goto out;
5378 }
5379
5380 left_path->search_commit_root = 1;
5381 left_path->skip_locking = 1;
5382 right_path->search_commit_root = 1;
5383 right_path->skip_locking = 1;
5384
5385 /*
5386 * Strategy: Go to the first items of both trees. Then do
5387 *
5388 * If both trees are at level 0
5389 * Compare keys of current items
5390 * If left < right treat left item as new, advance left tree
5391 * and repeat
5392 * If left > right treat right item as deleted, advance right tree
5393 * and repeat
5394 * If left == right do deep compare of items, treat as changed if
5395 * needed, advance both trees and repeat
5396 * If both trees are at the same level but not at level 0
5397 * Compare keys of current nodes/leafs
5398 * If left < right advance left tree and repeat
5399 * If left > right advance right tree and repeat
5400 * If left == right compare blockptrs of the next nodes/leafs
5401 * If they match advance both trees but stay at the same level
5402 * and repeat
5403 * If they don't match advance both trees while allowing to go
5404 * deeper and repeat
5405 * If tree levels are different
5406 * Advance the tree that needs it and repeat
5407 *
5408 * Advancing a tree means:
5409 * If we are at level 0, try to go to the next slot. If that's not
5410 * possible, go one level up and repeat. Stop when we found a level
5411 * where we could go to the next slot. We may at this point be on a
5412 * node or a leaf.
5413 *
5414 * If we are not at level 0 and not on shared tree blocks, go one
5415 * level deeper.
5416 *
5417 * If we are not at level 0 and on shared tree blocks, go one slot to
5418 * the right if possible or go up and right.
5419 */
5420
5421 down_read(&left_root->fs_info->commit_root_sem);
5422 left_level = btrfs_header_level(left_root->commit_root);
5423 left_root_level = left_level;
5424 left_path->nodes[left_level] = left_root->commit_root;
5425 extent_buffer_get(left_path->nodes[left_level]);
5426
5427 right_level = btrfs_header_level(right_root->commit_root);
5428 right_root_level = right_level;
5429 right_path->nodes[right_level] = right_root->commit_root;
5430 extent_buffer_get(right_path->nodes[right_level]);
5431 up_read(&left_root->fs_info->commit_root_sem);
5432
5433 if (left_level == 0)
5434 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5435 &left_key, left_path->slots[left_level]);
5436 else
5437 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5438 &left_key, left_path->slots[left_level]);
5439 if (right_level == 0)
5440 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5441 &right_key, right_path->slots[right_level]);
5442 else
5443 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5444 &right_key, right_path->slots[right_level]);
5445
5446 left_end_reached = right_end_reached = 0;
5447 advance_left = advance_right = 0;
5448
5449 while (1) {
5450 if (advance_left && !left_end_reached) {
5451 ret = tree_advance(left_root, left_path, &left_level,
5452 left_root_level,
5453 advance_left != ADVANCE_ONLY_NEXT,
5454 &left_key);
5455 if (ret < 0)
5456 left_end_reached = ADVANCE;
5457 advance_left = 0;
5458 }
5459 if (advance_right && !right_end_reached) {
5460 ret = tree_advance(right_root, right_path, &right_level,
5461 right_root_level,
5462 advance_right != ADVANCE_ONLY_NEXT,
5463 &right_key);
5464 if (ret < 0)
5465 right_end_reached = ADVANCE;
5466 advance_right = 0;
5467 }
5468
5469 if (left_end_reached && right_end_reached) {
5470 ret = 0;
5471 goto out;
5472 } else if (left_end_reached) {
5473 if (right_level == 0) {
5474 ret = changed_cb(left_root, right_root,
5475 left_path, right_path,
5476 &right_key,
5477 BTRFS_COMPARE_TREE_DELETED,
5478 ctx);
5479 if (ret < 0)
5480 goto out;
5481 }
5482 advance_right = ADVANCE;
5483 continue;
5484 } else if (right_end_reached) {
5485 if (left_level == 0) {
5486 ret = changed_cb(left_root, right_root,
5487 left_path, right_path,
5488 &left_key,
5489 BTRFS_COMPARE_TREE_NEW,
5490 ctx);
5491 if (ret < 0)
5492 goto out;
5493 }
5494 advance_left = ADVANCE;
5495 continue;
5496 }
5497
5498 if (left_level == 0 && right_level == 0) {
5499 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5500 if (cmp < 0) {
5501 ret = changed_cb(left_root, right_root,
5502 left_path, right_path,
5503 &left_key,
5504 BTRFS_COMPARE_TREE_NEW,
5505 ctx);
5506 if (ret < 0)
5507 goto out;
5508 advance_left = ADVANCE;
5509 } else if (cmp > 0) {
5510 ret = changed_cb(left_root, right_root,
5511 left_path, right_path,
5512 &right_key,
5513 BTRFS_COMPARE_TREE_DELETED,
5514 ctx);
5515 if (ret < 0)
5516 goto out;
5517 advance_right = ADVANCE;
5518 } else {
5519 enum btrfs_compare_tree_result cmp;
5520
5521 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5522 ret = tree_compare_item(left_root, left_path,
5523 right_path, tmp_buf);
5524 if (ret)
5525 cmp = BTRFS_COMPARE_TREE_CHANGED;
5526 else
5527 cmp = BTRFS_COMPARE_TREE_SAME;
5528 ret = changed_cb(left_root, right_root,
5529 left_path, right_path,
5530 &left_key, cmp, ctx);
5531 if (ret < 0)
5532 goto out;
5533 advance_left = ADVANCE;
5534 advance_right = ADVANCE;
5535 }
5536 } else if (left_level == right_level) {
5537 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5538 if (cmp < 0) {
5539 advance_left = ADVANCE;
5540 } else if (cmp > 0) {
5541 advance_right = ADVANCE;
5542 } else {
5543 left_blockptr = btrfs_node_blockptr(
5544 left_path->nodes[left_level],
5545 left_path->slots[left_level]);
5546 right_blockptr = btrfs_node_blockptr(
5547 right_path->nodes[right_level],
5548 right_path->slots[right_level]);
5549 left_gen = btrfs_node_ptr_generation(
5550 left_path->nodes[left_level],
5551 left_path->slots[left_level]);
5552 right_gen = btrfs_node_ptr_generation(
5553 right_path->nodes[right_level],
5554 right_path->slots[right_level]);
5555 if (left_blockptr == right_blockptr &&
5556 left_gen == right_gen) {
5557 /*
5558 * As we're on a shared block, don't
5559 * allow to go deeper.
5560 */
5561 advance_left = ADVANCE_ONLY_NEXT;
5562 advance_right = ADVANCE_ONLY_NEXT;
5563 } else {
5564 advance_left = ADVANCE;
5565 advance_right = ADVANCE;
5566 }
5567 }
5568 } else if (left_level < right_level) {
5569 advance_right = ADVANCE;
5570 } else {
5571 advance_left = ADVANCE;
5572 }
5573 }
5574
5575 out:
5576 btrfs_free_path(left_path);
5577 btrfs_free_path(right_path);
5578 kfree(tmp_buf);
5579 return ret;
5580 }
5581
5582 /*
5583 * this is similar to btrfs_next_leaf, but does not try to preserve
5584 * and fixup the path. It looks for and returns the next key in the
5585 * tree based on the current path and the min_trans parameters.
5586 *
5587 * 0 is returned if another key is found, < 0 if there are any errors
5588 * and 1 is returned if there are no higher keys in the tree
5589 *
5590 * path->keep_locks should be set to 1 on the search made before
5591 * calling this function.
5592 */
5593 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5594 struct btrfs_key *key, int level, u64 min_trans)
5595 {
5596 int slot;
5597 struct extent_buffer *c;
5598
5599 WARN_ON(!path->keep_locks);
5600 while (level < BTRFS_MAX_LEVEL) {
5601 if (!path->nodes[level])
5602 return 1;
5603
5604 slot = path->slots[level] + 1;
5605 c = path->nodes[level];
5606 next:
5607 if (slot >= btrfs_header_nritems(c)) {
5608 int ret;
5609 int orig_lowest;
5610 struct btrfs_key cur_key;
5611 if (level + 1 >= BTRFS_MAX_LEVEL ||
5612 !path->nodes[level + 1])
5613 return 1;
5614
5615 if (path->locks[level + 1]) {
5616 level++;
5617 continue;
5618 }
5619
5620 slot = btrfs_header_nritems(c) - 1;
5621 if (level == 0)
5622 btrfs_item_key_to_cpu(c, &cur_key, slot);
5623 else
5624 btrfs_node_key_to_cpu(c, &cur_key, slot);
5625
5626 orig_lowest = path->lowest_level;
5627 btrfs_release_path(path);
5628 path->lowest_level = level;
5629 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5630 0, 0);
5631 path->lowest_level = orig_lowest;
5632 if (ret < 0)
5633 return ret;
5634
5635 c = path->nodes[level];
5636 slot = path->slots[level];
5637 if (ret == 0)
5638 slot++;
5639 goto next;
5640 }
5641
5642 if (level == 0)
5643 btrfs_item_key_to_cpu(c, key, slot);
5644 else {
5645 u64 gen = btrfs_node_ptr_generation(c, slot);
5646
5647 if (gen < min_trans) {
5648 slot++;
5649 goto next;
5650 }
5651 btrfs_node_key_to_cpu(c, key, slot);
5652 }
5653 return 0;
5654 }
5655 return 1;
5656 }
5657
5658 /*
5659 * search the tree again to find a leaf with greater keys
5660 * returns 0 if it found something or 1 if there are no greater leaves.
5661 * returns < 0 on io errors.
5662 */
5663 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5664 {
5665 return btrfs_next_old_leaf(root, path, 0);
5666 }
5667
5668 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5669 u64 time_seq)
5670 {
5671 int slot;
5672 int level;
5673 struct extent_buffer *c;
5674 struct extent_buffer *next;
5675 struct btrfs_key key;
5676 u32 nritems;
5677 int ret;
5678 int old_spinning = path->leave_spinning;
5679 int next_rw_lock = 0;
5680
5681 nritems = btrfs_header_nritems(path->nodes[0]);
5682 if (nritems == 0)
5683 return 1;
5684
5685 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5686 again:
5687 level = 1;
5688 next = NULL;
5689 next_rw_lock = 0;
5690 btrfs_release_path(path);
5691
5692 path->keep_locks = 1;
5693 path->leave_spinning = 1;
5694
5695 if (time_seq)
5696 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5697 else
5698 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5699 path->keep_locks = 0;
5700
5701 if (ret < 0)
5702 return ret;
5703
5704 nritems = btrfs_header_nritems(path->nodes[0]);
5705 /*
5706 * by releasing the path above we dropped all our locks. A balance
5707 * could have added more items next to the key that used to be
5708 * at the very end of the block. So, check again here and
5709 * advance the path if there are now more items available.
5710 */
5711 if (nritems > 0 && path->slots[0] < nritems - 1) {
5712 if (ret == 0)
5713 path->slots[0]++;
5714 ret = 0;
5715 goto done;
5716 }
5717 /*
5718 * So the above check misses one case:
5719 * - after releasing the path above, someone has removed the item that
5720 * used to be at the very end of the block, and balance between leafs
5721 * gets another one with bigger key.offset to replace it.
5722 *
5723 * This one should be returned as well, or we can get leaf corruption
5724 * later(esp. in __btrfs_drop_extents()).
5725 *
5726 * And a bit more explanation about this check,
5727 * with ret > 0, the key isn't found, the path points to the slot
5728 * where it should be inserted, so the path->slots[0] item must be the
5729 * bigger one.
5730 */
5731 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5732 ret = 0;
5733 goto done;
5734 }
5735
5736 while (level < BTRFS_MAX_LEVEL) {
5737 if (!path->nodes[level]) {
5738 ret = 1;
5739 goto done;
5740 }
5741
5742 slot = path->slots[level] + 1;
5743 c = path->nodes[level];
5744 if (slot >= btrfs_header_nritems(c)) {
5745 level++;
5746 if (level == BTRFS_MAX_LEVEL) {
5747 ret = 1;
5748 goto done;
5749 }
5750 continue;
5751 }
5752
5753 if (next) {
5754 btrfs_tree_unlock_rw(next, next_rw_lock);
5755 free_extent_buffer(next);
5756 }
5757
5758 next = c;
5759 next_rw_lock = path->locks[level];
5760 ret = read_block_for_search(NULL, root, path, &next, level,
5761 slot, &key, 0);
5762 if (ret == -EAGAIN)
5763 goto again;
5764
5765 if (ret < 0) {
5766 btrfs_release_path(path);
5767 goto done;
5768 }
5769
5770 if (!path->skip_locking) {
5771 ret = btrfs_try_tree_read_lock(next);
5772 if (!ret && time_seq) {
5773 /*
5774 * If we don't get the lock, we may be racing
5775 * with push_leaf_left, holding that lock while
5776 * itself waiting for the leaf we've currently
5777 * locked. To solve this situation, we give up
5778 * on our lock and cycle.
5779 */
5780 free_extent_buffer(next);
5781 btrfs_release_path(path);
5782 cond_resched();
5783 goto again;
5784 }
5785 if (!ret) {
5786 btrfs_set_path_blocking(path);
5787 btrfs_tree_read_lock(next);
5788 btrfs_clear_path_blocking(path, next,
5789 BTRFS_READ_LOCK);
5790 }
5791 next_rw_lock = BTRFS_READ_LOCK;
5792 }
5793 break;
5794 }
5795 path->slots[level] = slot;
5796 while (1) {
5797 level--;
5798 c = path->nodes[level];
5799 if (path->locks[level])
5800 btrfs_tree_unlock_rw(c, path->locks[level]);
5801
5802 free_extent_buffer(c);
5803 path->nodes[level] = next;
5804 path->slots[level] = 0;
5805 if (!path->skip_locking)
5806 path->locks[level] = next_rw_lock;
5807 if (!level)
5808 break;
5809
5810 ret = read_block_for_search(NULL, root, path, &next, level,
5811 0, &key, 0);
5812 if (ret == -EAGAIN)
5813 goto again;
5814
5815 if (ret < 0) {
5816 btrfs_release_path(path);
5817 goto done;
5818 }
5819
5820 if (!path->skip_locking) {
5821 ret = btrfs_try_tree_read_lock(next);
5822 if (!ret) {
5823 btrfs_set_path_blocking(path);
5824 btrfs_tree_read_lock(next);
5825 btrfs_clear_path_blocking(path, next,
5826 BTRFS_READ_LOCK);
5827 }
5828 next_rw_lock = BTRFS_READ_LOCK;
5829 }
5830 }
5831 ret = 0;
5832 done:
5833 unlock_up(path, 0, 1, 0, NULL);
5834 path->leave_spinning = old_spinning;
5835 if (!old_spinning)
5836 btrfs_set_path_blocking(path);
5837
5838 return ret;
5839 }
5840
5841 /*
5842 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5843 * searching until it gets past min_objectid or finds an item of 'type'
5844 *
5845 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5846 */
5847 int btrfs_previous_item(struct btrfs_root *root,
5848 struct btrfs_path *path, u64 min_objectid,
5849 int type)
5850 {
5851 struct btrfs_key found_key;
5852 struct extent_buffer *leaf;
5853 u32 nritems;
5854 int ret;
5855
5856 while (1) {
5857 if (path->slots[0] == 0) {
5858 btrfs_set_path_blocking(path);
5859 ret = btrfs_prev_leaf(root, path);
5860 if (ret != 0)
5861 return ret;
5862 } else {
5863 path->slots[0]--;
5864 }
5865 leaf = path->nodes[0];
5866 nritems = btrfs_header_nritems(leaf);
5867 if (nritems == 0)
5868 return 1;
5869 if (path->slots[0] == nritems)
5870 path->slots[0]--;
5871
5872 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5873 if (found_key.objectid < min_objectid)
5874 break;
5875 if (found_key.type == type)
5876 return 0;
5877 if (found_key.objectid == min_objectid &&
5878 found_key.type < type)
5879 break;
5880 }
5881 return 1;
5882 }
5883
5884 /*
5885 * search in extent tree to find a previous Metadata/Data extent item with
5886 * min objecitd.
5887 *
5888 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5889 */
5890 int btrfs_previous_extent_item(struct btrfs_root *root,
5891 struct btrfs_path *path, u64 min_objectid)
5892 {
5893 struct btrfs_key found_key;
5894 struct extent_buffer *leaf;
5895 u32 nritems;
5896 int ret;
5897
5898 while (1) {
5899 if (path->slots[0] == 0) {
5900 btrfs_set_path_blocking(path);
5901 ret = btrfs_prev_leaf(root, path);
5902 if (ret != 0)
5903 return ret;
5904 } else {
5905 path->slots[0]--;
5906 }
5907 leaf = path->nodes[0];
5908 nritems = btrfs_header_nritems(leaf);
5909 if (nritems == 0)
5910 return 1;
5911 if (path->slots[0] == nritems)
5912 path->slots[0]--;
5913
5914 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5915 if (found_key.objectid < min_objectid)
5916 break;
5917 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5918 found_key.type == BTRFS_METADATA_ITEM_KEY)
5919 return 0;
5920 if (found_key.objectid == min_objectid &&
5921 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5922 break;
5923 }
5924 return 1;
5925 }
This page took 0.247023 seconds and 4 git commands to generate.