Btrfs: log ram bytes properly
[deliverable/linux.git] / fs / btrfs / relocation.c
CommitLineData
5d4f98a2
YZ
1/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h>
23#include <linux/rbtree.h>
5a0e3ad6 24#include <linux/slab.h>
5d4f98a2
YZ
25#include "ctree.h"
26#include "disk-io.h"
27#include "transaction.h"
28#include "volumes.h"
29#include "locking.h"
30#include "btrfs_inode.h"
31#include "async-thread.h"
0af3d00b 32#include "free-space-cache.h"
581bb050 33#include "inode-map.h"
5d4f98a2
YZ
34
35/*
36 * backref_node, mapping_node and tree_block start with this
37 */
38struct tree_entry {
39 struct rb_node rb_node;
40 u64 bytenr;
41};
42
43/*
44 * present a tree block in the backref cache
45 */
46struct backref_node {
47 struct rb_node rb_node;
48 u64 bytenr;
3fd0a558
YZ
49
50 u64 new_bytenr;
51 /* objectid of tree block owner, can be not uptodate */
5d4f98a2 52 u64 owner;
3fd0a558
YZ
53 /* link to pending, changed or detached list */
54 struct list_head list;
5d4f98a2
YZ
55 /* list of upper level blocks reference this block */
56 struct list_head upper;
57 /* list of child blocks in the cache */
58 struct list_head lower;
59 /* NULL if this node is not tree root */
60 struct btrfs_root *root;
61 /* extent buffer got by COW the block */
62 struct extent_buffer *eb;
63 /* level of tree block */
64 unsigned int level:8;
3fd0a558
YZ
65 /* is the block in non-reference counted tree */
66 unsigned int cowonly:1;
67 /* 1 if no child node in the cache */
5d4f98a2
YZ
68 unsigned int lowest:1;
69 /* is the extent buffer locked */
70 unsigned int locked:1;
71 /* has the block been processed */
72 unsigned int processed:1;
73 /* have backrefs of this block been checked */
74 unsigned int checked:1;
3fd0a558
YZ
75 /*
76 * 1 if corresponding block has been cowed but some upper
77 * level block pointers may not point to the new location
78 */
79 unsigned int pending:1;
80 /*
81 * 1 if the backref node isn't connected to any other
82 * backref node.
83 */
84 unsigned int detached:1;
5d4f98a2
YZ
85};
86
87/*
88 * present a block pointer in the backref cache
89 */
90struct backref_edge {
91 struct list_head list[2];
92 struct backref_node *node[2];
5d4f98a2
YZ
93};
94
95#define LOWER 0
96#define UPPER 1
97
98struct backref_cache {
99 /* red black tree of all backref nodes in the cache */
100 struct rb_root rb_root;
3fd0a558
YZ
101 /* for passing backref nodes to btrfs_reloc_cow_block */
102 struct backref_node *path[BTRFS_MAX_LEVEL];
103 /*
104 * list of blocks that have been cowed but some block
105 * pointers in upper level blocks may not reflect the
106 * new location
107 */
5d4f98a2 108 struct list_head pending[BTRFS_MAX_LEVEL];
3fd0a558
YZ
109 /* list of backref nodes with no child node */
110 struct list_head leaves;
111 /* list of blocks that have been cowed in current transaction */
112 struct list_head changed;
113 /* list of detached backref node. */
114 struct list_head detached;
115
116 u64 last_trans;
117
118 int nr_nodes;
119 int nr_edges;
5d4f98a2
YZ
120};
121
122/*
123 * map address of tree root to tree
124 */
125struct mapping_node {
126 struct rb_node rb_node;
127 u64 bytenr;
128 void *data;
129};
130
131struct mapping_tree {
132 struct rb_root rb_root;
133 spinlock_t lock;
134};
135
136/*
137 * present a tree block to process
138 */
139struct tree_block {
140 struct rb_node rb_node;
141 u64 bytenr;
142 struct btrfs_key key;
143 unsigned int level:8;
144 unsigned int key_ready:1;
145};
146
0257bb82
YZ
147#define MAX_EXTENTS 128
148
149struct file_extent_cluster {
150 u64 start;
151 u64 end;
152 u64 boundary[MAX_EXTENTS];
153 unsigned int nr;
154};
155
5d4f98a2
YZ
156struct reloc_control {
157 /* block group to relocate */
158 struct btrfs_block_group_cache *block_group;
159 /* extent tree */
160 struct btrfs_root *extent_root;
161 /* inode for moving data */
162 struct inode *data_inode;
3fd0a558
YZ
163
164 struct btrfs_block_rsv *block_rsv;
165
166 struct backref_cache backref_cache;
167
168 struct file_extent_cluster cluster;
5d4f98a2
YZ
169 /* tree blocks have been processed */
170 struct extent_io_tree processed_blocks;
171 /* map start of tree root to corresponding reloc tree */
172 struct mapping_tree reloc_root_tree;
173 /* list of reloc trees */
174 struct list_head reloc_roots;
3fd0a558
YZ
175 /* size of metadata reservation for merging reloc trees */
176 u64 merging_rsv_size;
177 /* size of relocated tree nodes */
178 u64 nodes_relocated;
179
5d4f98a2
YZ
180 u64 search_start;
181 u64 extents_found;
3fd0a558 182
3fd0a558
YZ
183 unsigned int stage:8;
184 unsigned int create_reloc_tree:1;
185 unsigned int merge_reloc_tree:1;
5d4f98a2 186 unsigned int found_file_extent:1;
3fd0a558 187 unsigned int commit_transaction:1;
5d4f98a2
YZ
188};
189
190/* stages of data relocation */
191#define MOVE_DATA_EXTENTS 0
192#define UPDATE_DATA_PTRS 1
193
3fd0a558
YZ
194static void remove_backref_node(struct backref_cache *cache,
195 struct backref_node *node);
196static void __mark_block_processed(struct reloc_control *rc,
197 struct backref_node *node);
5d4f98a2
YZ
198
199static void mapping_tree_init(struct mapping_tree *tree)
200{
6bef4d31 201 tree->rb_root = RB_ROOT;
5d4f98a2
YZ
202 spin_lock_init(&tree->lock);
203}
204
205static void backref_cache_init(struct backref_cache *cache)
206{
207 int i;
6bef4d31 208 cache->rb_root = RB_ROOT;
5d4f98a2
YZ
209 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
210 INIT_LIST_HEAD(&cache->pending[i]);
3fd0a558
YZ
211 INIT_LIST_HEAD(&cache->changed);
212 INIT_LIST_HEAD(&cache->detached);
213 INIT_LIST_HEAD(&cache->leaves);
214}
215
216static void backref_cache_cleanup(struct backref_cache *cache)
217{
218 struct backref_node *node;
219 int i;
220
221 while (!list_empty(&cache->detached)) {
222 node = list_entry(cache->detached.next,
223 struct backref_node, list);
224 remove_backref_node(cache, node);
225 }
226
227 while (!list_empty(&cache->leaves)) {
228 node = list_entry(cache->leaves.next,
229 struct backref_node, lower);
230 remove_backref_node(cache, node);
231 }
232
233 cache->last_trans = 0;
234
235 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
236 BUG_ON(!list_empty(&cache->pending[i]));
237 BUG_ON(!list_empty(&cache->changed));
238 BUG_ON(!list_empty(&cache->detached));
239 BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
240 BUG_ON(cache->nr_nodes);
241 BUG_ON(cache->nr_edges);
242}
243
244static struct backref_node *alloc_backref_node(struct backref_cache *cache)
245{
246 struct backref_node *node;
247
248 node = kzalloc(sizeof(*node), GFP_NOFS);
249 if (node) {
250 INIT_LIST_HEAD(&node->list);
251 INIT_LIST_HEAD(&node->upper);
252 INIT_LIST_HEAD(&node->lower);
253 RB_CLEAR_NODE(&node->rb_node);
254 cache->nr_nodes++;
255 }
256 return node;
257}
258
259static void free_backref_node(struct backref_cache *cache,
260 struct backref_node *node)
261{
262 if (node) {
263 cache->nr_nodes--;
264 kfree(node);
265 }
266}
267
268static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
269{
270 struct backref_edge *edge;
271
272 edge = kzalloc(sizeof(*edge), GFP_NOFS);
273 if (edge)
274 cache->nr_edges++;
275 return edge;
5d4f98a2
YZ
276}
277
3fd0a558
YZ
278static void free_backref_edge(struct backref_cache *cache,
279 struct backref_edge *edge)
5d4f98a2 280{
3fd0a558
YZ
281 if (edge) {
282 cache->nr_edges--;
283 kfree(edge);
284 }
5d4f98a2
YZ
285}
286
287static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
288 struct rb_node *node)
289{
290 struct rb_node **p = &root->rb_node;
291 struct rb_node *parent = NULL;
292 struct tree_entry *entry;
293
294 while (*p) {
295 parent = *p;
296 entry = rb_entry(parent, struct tree_entry, rb_node);
297
298 if (bytenr < entry->bytenr)
299 p = &(*p)->rb_left;
300 else if (bytenr > entry->bytenr)
301 p = &(*p)->rb_right;
302 else
303 return parent;
304 }
305
306 rb_link_node(node, parent, p);
307 rb_insert_color(node, root);
308 return NULL;
309}
310
311static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
312{
313 struct rb_node *n = root->rb_node;
314 struct tree_entry *entry;
315
316 while (n) {
317 entry = rb_entry(n, struct tree_entry, rb_node);
318
319 if (bytenr < entry->bytenr)
320 n = n->rb_left;
321 else if (bytenr > entry->bytenr)
322 n = n->rb_right;
323 else
324 return n;
325 }
326 return NULL;
327}
328
43c04fb1
JM
329void backref_tree_panic(struct rb_node *rb_node, int errno,
330 u64 bytenr)
331{
332
333 struct btrfs_fs_info *fs_info = NULL;
334 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
335 rb_node);
336 if (bnode->root)
337 fs_info = bnode->root->fs_info;
338 btrfs_panic(fs_info, errno, "Inconsistency in backref cache "
339 "found at offset %llu\n", (unsigned long long)bytenr);
340}
341
5d4f98a2
YZ
342/*
343 * walk up backref nodes until reach node presents tree root
344 */
345static struct backref_node *walk_up_backref(struct backref_node *node,
346 struct backref_edge *edges[],
347 int *index)
348{
349 struct backref_edge *edge;
350 int idx = *index;
351
352 while (!list_empty(&node->upper)) {
353 edge = list_entry(node->upper.next,
354 struct backref_edge, list[LOWER]);
355 edges[idx++] = edge;
356 node = edge->node[UPPER];
357 }
3fd0a558 358 BUG_ON(node->detached);
5d4f98a2
YZ
359 *index = idx;
360 return node;
361}
362
363/*
364 * walk down backref nodes to find start of next reference path
365 */
366static struct backref_node *walk_down_backref(struct backref_edge *edges[],
367 int *index)
368{
369 struct backref_edge *edge;
370 struct backref_node *lower;
371 int idx = *index;
372
373 while (idx > 0) {
374 edge = edges[idx - 1];
375 lower = edge->node[LOWER];
376 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
377 idx--;
378 continue;
379 }
380 edge = list_entry(edge->list[LOWER].next,
381 struct backref_edge, list[LOWER]);
382 edges[idx - 1] = edge;
383 *index = idx;
384 return edge->node[UPPER];
385 }
386 *index = 0;
387 return NULL;
388}
389
3fd0a558
YZ
390static void unlock_node_buffer(struct backref_node *node)
391{
392 if (node->locked) {
393 btrfs_tree_unlock(node->eb);
394 node->locked = 0;
395 }
396}
397
5d4f98a2
YZ
398static void drop_node_buffer(struct backref_node *node)
399{
400 if (node->eb) {
3fd0a558 401 unlock_node_buffer(node);
5d4f98a2
YZ
402 free_extent_buffer(node->eb);
403 node->eb = NULL;
404 }
405}
406
407static void drop_backref_node(struct backref_cache *tree,
408 struct backref_node *node)
409{
5d4f98a2
YZ
410 BUG_ON(!list_empty(&node->upper));
411
412 drop_node_buffer(node);
3fd0a558 413 list_del(&node->list);
5d4f98a2 414 list_del(&node->lower);
3fd0a558
YZ
415 if (!RB_EMPTY_NODE(&node->rb_node))
416 rb_erase(&node->rb_node, &tree->rb_root);
417 free_backref_node(tree, node);
5d4f98a2
YZ
418}
419
420/*
421 * remove a backref node from the backref cache
422 */
423static void remove_backref_node(struct backref_cache *cache,
424 struct backref_node *node)
425{
426 struct backref_node *upper;
427 struct backref_edge *edge;
428
429 if (!node)
430 return;
431
3fd0a558 432 BUG_ON(!node->lowest && !node->detached);
5d4f98a2
YZ
433 while (!list_empty(&node->upper)) {
434 edge = list_entry(node->upper.next, struct backref_edge,
435 list[LOWER]);
436 upper = edge->node[UPPER];
437 list_del(&edge->list[LOWER]);
438 list_del(&edge->list[UPPER]);
3fd0a558
YZ
439 free_backref_edge(cache, edge);
440
441 if (RB_EMPTY_NODE(&upper->rb_node)) {
442 BUG_ON(!list_empty(&node->upper));
443 drop_backref_node(cache, node);
444 node = upper;
445 node->lowest = 1;
446 continue;
447 }
5d4f98a2 448 /*
3fd0a558 449 * add the node to leaf node list if no other
5d4f98a2
YZ
450 * child block cached.
451 */
452 if (list_empty(&upper->lower)) {
3fd0a558 453 list_add_tail(&upper->lower, &cache->leaves);
5d4f98a2
YZ
454 upper->lowest = 1;
455 }
456 }
3fd0a558 457
5d4f98a2
YZ
458 drop_backref_node(cache, node);
459}
460
3fd0a558
YZ
461static void update_backref_node(struct backref_cache *cache,
462 struct backref_node *node, u64 bytenr)
463{
464 struct rb_node *rb_node;
465 rb_erase(&node->rb_node, &cache->rb_root);
466 node->bytenr = bytenr;
467 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
43c04fb1
JM
468 if (rb_node)
469 backref_tree_panic(rb_node, -EEXIST, bytenr);
3fd0a558
YZ
470}
471
472/*
473 * update backref cache after a transaction commit
474 */
475static int update_backref_cache(struct btrfs_trans_handle *trans,
476 struct backref_cache *cache)
477{
478 struct backref_node *node;
479 int level = 0;
480
481 if (cache->last_trans == 0) {
482 cache->last_trans = trans->transid;
483 return 0;
484 }
485
486 if (cache->last_trans == trans->transid)
487 return 0;
488
489 /*
490 * detached nodes are used to avoid unnecessary backref
491 * lookup. transaction commit changes the extent tree.
492 * so the detached nodes are no longer useful.
493 */
494 while (!list_empty(&cache->detached)) {
495 node = list_entry(cache->detached.next,
496 struct backref_node, list);
497 remove_backref_node(cache, node);
498 }
499
500 while (!list_empty(&cache->changed)) {
501 node = list_entry(cache->changed.next,
502 struct backref_node, list);
503 list_del_init(&node->list);
504 BUG_ON(node->pending);
505 update_backref_node(cache, node, node->new_bytenr);
506 }
507
508 /*
509 * some nodes can be left in the pending list if there were
510 * errors during processing the pending nodes.
511 */
512 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
513 list_for_each_entry(node, &cache->pending[level], list) {
514 BUG_ON(!node->pending);
515 if (node->bytenr == node->new_bytenr)
516 continue;
517 update_backref_node(cache, node, node->new_bytenr);
518 }
519 }
520
521 cache->last_trans = 0;
522 return 1;
523}
524
f2a97a9d 525
3fd0a558
YZ
526static int should_ignore_root(struct btrfs_root *root)
527{
528 struct btrfs_root *reloc_root;
529
530 if (!root->ref_cows)
531 return 0;
532
533 reloc_root = root->reloc_root;
534 if (!reloc_root)
535 return 0;
536
537 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
538 root->fs_info->running_transaction->transid - 1)
539 return 0;
540 /*
541 * if there is reloc tree and it was created in previous
542 * transaction backref lookup can find the reloc tree,
543 * so backref node for the fs tree root is useless for
544 * relocation.
545 */
546 return 1;
547}
5d4f98a2
YZ
548/*
549 * find reloc tree by address of tree root
550 */
551static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
552 u64 bytenr)
553{
554 struct rb_node *rb_node;
555 struct mapping_node *node;
556 struct btrfs_root *root = NULL;
557
558 spin_lock(&rc->reloc_root_tree.lock);
559 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
560 if (rb_node) {
561 node = rb_entry(rb_node, struct mapping_node, rb_node);
562 root = (struct btrfs_root *)node->data;
563 }
564 spin_unlock(&rc->reloc_root_tree.lock);
565 return root;
566}
567
568static int is_cowonly_root(u64 root_objectid)
569{
570 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
571 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
572 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
573 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
574 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
575 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
576 return 1;
577 return 0;
578}
579
580static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
581 u64 root_objectid)
582{
583 struct btrfs_key key;
584
585 key.objectid = root_objectid;
586 key.type = BTRFS_ROOT_ITEM_KEY;
587 if (is_cowonly_root(root_objectid))
588 key.offset = 0;
589 else
590 key.offset = (u64)-1;
591
592 return btrfs_read_fs_root_no_name(fs_info, &key);
593}
594
595#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
596static noinline_for_stack
597struct btrfs_root *find_tree_root(struct reloc_control *rc,
598 struct extent_buffer *leaf,
599 struct btrfs_extent_ref_v0 *ref0)
600{
601 struct btrfs_root *root;
602 u64 root_objectid = btrfs_ref_root_v0(leaf, ref0);
603 u64 generation = btrfs_ref_generation_v0(leaf, ref0);
604
605 BUG_ON(root_objectid == BTRFS_TREE_RELOC_OBJECTID);
606
607 root = read_fs_root(rc->extent_root->fs_info, root_objectid);
608 BUG_ON(IS_ERR(root));
609
610 if (root->ref_cows &&
611 generation != btrfs_root_generation(&root->root_item))
612 return NULL;
613
614 return root;
615}
616#endif
617
618static noinline_for_stack
619int find_inline_backref(struct extent_buffer *leaf, int slot,
620 unsigned long *ptr, unsigned long *end)
621{
3173a18f 622 struct btrfs_key key;
5d4f98a2
YZ
623 struct btrfs_extent_item *ei;
624 struct btrfs_tree_block_info *bi;
625 u32 item_size;
626
3173a18f
JB
627 btrfs_item_key_to_cpu(leaf, &key, slot);
628
5d4f98a2
YZ
629 item_size = btrfs_item_size_nr(leaf, slot);
630#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
631 if (item_size < sizeof(*ei)) {
632 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
633 return 1;
634 }
635#endif
636 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
637 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
638 BTRFS_EXTENT_FLAG_TREE_BLOCK));
639
3173a18f
JB
640 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
641 item_size <= sizeof(*ei) + sizeof(*bi)) {
5d4f98a2
YZ
642 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
643 return 1;
644 }
645
3173a18f
JB
646 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
647 bi = (struct btrfs_tree_block_info *)(ei + 1);
648 *ptr = (unsigned long)(bi + 1);
649 } else {
650 *ptr = (unsigned long)(ei + 1);
651 }
5d4f98a2
YZ
652 *end = (unsigned long)ei + item_size;
653 return 0;
654}
655
656/*
657 * build backref tree for a given tree block. root of the backref tree
658 * corresponds the tree block, leaves of the backref tree correspond
659 * roots of b-trees that reference the tree block.
660 *
661 * the basic idea of this function is check backrefs of a given block
662 * to find upper level blocks that refernece the block, and then check
663 * bakcrefs of these upper level blocks recursively. the recursion stop
664 * when tree root is reached or backrefs for the block is cached.
665 *
666 * NOTE: if we find backrefs for a block are cached, we know backrefs
667 * for all upper level blocks that directly/indirectly reference the
668 * block are also cached.
669 */
3fd0a558
YZ
670static noinline_for_stack
671struct backref_node *build_backref_tree(struct reloc_control *rc,
672 struct btrfs_key *node_key,
673 int level, u64 bytenr)
5d4f98a2 674{
3fd0a558 675 struct backref_cache *cache = &rc->backref_cache;
5d4f98a2
YZ
676 struct btrfs_path *path1;
677 struct btrfs_path *path2;
678 struct extent_buffer *eb;
679 struct btrfs_root *root;
680 struct backref_node *cur;
681 struct backref_node *upper;
682 struct backref_node *lower;
683 struct backref_node *node = NULL;
684 struct backref_node *exist = NULL;
685 struct backref_edge *edge;
686 struct rb_node *rb_node;
687 struct btrfs_key key;
688 unsigned long end;
689 unsigned long ptr;
690 LIST_HEAD(list);
3fd0a558
YZ
691 LIST_HEAD(useless);
692 int cowonly;
5d4f98a2
YZ
693 int ret;
694 int err = 0;
695
696 path1 = btrfs_alloc_path();
697 path2 = btrfs_alloc_path();
698 if (!path1 || !path2) {
699 err = -ENOMEM;
700 goto out;
701 }
026fd317
JB
702 path1->reada = 1;
703 path2->reada = 2;
5d4f98a2 704
3fd0a558 705 node = alloc_backref_node(cache);
5d4f98a2
YZ
706 if (!node) {
707 err = -ENOMEM;
708 goto out;
709 }
710
5d4f98a2 711 node->bytenr = bytenr;
5d4f98a2
YZ
712 node->level = level;
713 node->lowest = 1;
714 cur = node;
715again:
716 end = 0;
717 ptr = 0;
718 key.objectid = cur->bytenr;
3173a18f 719 key.type = BTRFS_METADATA_ITEM_KEY;
5d4f98a2
YZ
720 key.offset = (u64)-1;
721
722 path1->search_commit_root = 1;
723 path1->skip_locking = 1;
724 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
725 0, 0);
726 if (ret < 0) {
727 err = ret;
728 goto out;
729 }
730 BUG_ON(!ret || !path1->slots[0]);
731
732 path1->slots[0]--;
733
734 WARN_ON(cur->checked);
735 if (!list_empty(&cur->upper)) {
736 /*
70f23fd6 737 * the backref was added previously when processing
5d4f98a2
YZ
738 * backref of type BTRFS_TREE_BLOCK_REF_KEY
739 */
740 BUG_ON(!list_is_singular(&cur->upper));
741 edge = list_entry(cur->upper.next, struct backref_edge,
742 list[LOWER]);
743 BUG_ON(!list_empty(&edge->list[UPPER]));
744 exist = edge->node[UPPER];
745 /*
746 * add the upper level block to pending list if we need
747 * check its backrefs
748 */
749 if (!exist->checked)
750 list_add_tail(&edge->list[UPPER], &list);
751 } else {
752 exist = NULL;
753 }
754
755 while (1) {
756 cond_resched();
757 eb = path1->nodes[0];
758
759 if (ptr >= end) {
760 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
761 ret = btrfs_next_leaf(rc->extent_root, path1);
762 if (ret < 0) {
763 err = ret;
764 goto out;
765 }
766 if (ret > 0)
767 break;
768 eb = path1->nodes[0];
769 }
770
771 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
772 if (key.objectid != cur->bytenr) {
773 WARN_ON(exist);
774 break;
775 }
776
3173a18f
JB
777 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
778 key.type == BTRFS_METADATA_ITEM_KEY) {
5d4f98a2
YZ
779 ret = find_inline_backref(eb, path1->slots[0],
780 &ptr, &end);
781 if (ret)
782 goto next;
783 }
784 }
785
786 if (ptr < end) {
787 /* update key for inline back ref */
788 struct btrfs_extent_inline_ref *iref;
789 iref = (struct btrfs_extent_inline_ref *)ptr;
790 key.type = btrfs_extent_inline_ref_type(eb, iref);
791 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
792 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
793 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
794 }
795
796 if (exist &&
797 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
798 exist->owner == key.offset) ||
799 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
800 exist->bytenr == key.offset))) {
801 exist = NULL;
802 goto next;
803 }
804
805#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
806 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY ||
807 key.type == BTRFS_EXTENT_REF_V0_KEY) {
3fd0a558 808 if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
5d4f98a2
YZ
809 struct btrfs_extent_ref_v0 *ref0;
810 ref0 = btrfs_item_ptr(eb, path1->slots[0],
811 struct btrfs_extent_ref_v0);
3fd0a558 812 if (key.objectid == key.offset) {
046f264f 813 root = find_tree_root(rc, eb, ref0);
3fd0a558
YZ
814 if (root && !should_ignore_root(root))
815 cur->root = root;
816 else
817 list_add(&cur->list, &useless);
818 break;
819 }
046f264f
YZ
820 if (is_cowonly_root(btrfs_ref_root_v0(eb,
821 ref0)))
822 cur->cowonly = 1;
5d4f98a2
YZ
823 }
824#else
825 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
826 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
827#endif
828 if (key.objectid == key.offset) {
829 /*
830 * only root blocks of reloc trees use
831 * backref of this type.
832 */
833 root = find_reloc_root(rc, cur->bytenr);
834 BUG_ON(!root);
835 cur->root = root;
836 break;
837 }
838
3fd0a558 839 edge = alloc_backref_edge(cache);
5d4f98a2
YZ
840 if (!edge) {
841 err = -ENOMEM;
842 goto out;
843 }
844 rb_node = tree_search(&cache->rb_root, key.offset);
845 if (!rb_node) {
3fd0a558 846 upper = alloc_backref_node(cache);
5d4f98a2 847 if (!upper) {
3fd0a558 848 free_backref_edge(cache, edge);
5d4f98a2
YZ
849 err = -ENOMEM;
850 goto out;
851 }
5d4f98a2 852 upper->bytenr = key.offset;
5d4f98a2
YZ
853 upper->level = cur->level + 1;
854 /*
855 * backrefs for the upper level block isn't
856 * cached, add the block to pending list
857 */
858 list_add_tail(&edge->list[UPPER], &list);
859 } else {
860 upper = rb_entry(rb_node, struct backref_node,
861 rb_node);
3fd0a558 862 BUG_ON(!upper->checked);
5d4f98a2
YZ
863 INIT_LIST_HEAD(&edge->list[UPPER]);
864 }
3fd0a558 865 list_add_tail(&edge->list[LOWER], &cur->upper);
5d4f98a2 866 edge->node[LOWER] = cur;
3fd0a558 867 edge->node[UPPER] = upper;
5d4f98a2
YZ
868
869 goto next;
870 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
871 goto next;
872 }
873
874 /* key.type == BTRFS_TREE_BLOCK_REF_KEY */
875 root = read_fs_root(rc->extent_root->fs_info, key.offset);
876 if (IS_ERR(root)) {
877 err = PTR_ERR(root);
878 goto out;
879 }
880
3fd0a558
YZ
881 if (!root->ref_cows)
882 cur->cowonly = 1;
883
5d4f98a2
YZ
884 if (btrfs_root_level(&root->root_item) == cur->level) {
885 /* tree root */
886 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
887 cur->bytenr);
3fd0a558
YZ
888 if (should_ignore_root(root))
889 list_add(&cur->list, &useless);
890 else
891 cur->root = root;
5d4f98a2
YZ
892 break;
893 }
894
895 level = cur->level + 1;
896
897 /*
898 * searching the tree to find upper level blocks
899 * reference the block.
900 */
901 path2->search_commit_root = 1;
902 path2->skip_locking = 1;
903 path2->lowest_level = level;
904 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
905 path2->lowest_level = 0;
906 if (ret < 0) {
907 err = ret;
908 goto out;
909 }
33c66f43
YZ
910 if (ret > 0 && path2->slots[level] > 0)
911 path2->slots[level]--;
5d4f98a2
YZ
912
913 eb = path2->nodes[level];
914 WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
915 cur->bytenr);
916
917 lower = cur;
918 for (; level < BTRFS_MAX_LEVEL; level++) {
919 if (!path2->nodes[level]) {
920 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
921 lower->bytenr);
3fd0a558
YZ
922 if (should_ignore_root(root))
923 list_add(&lower->list, &useless);
924 else
925 lower->root = root;
5d4f98a2
YZ
926 break;
927 }
928
3fd0a558 929 edge = alloc_backref_edge(cache);
5d4f98a2
YZ
930 if (!edge) {
931 err = -ENOMEM;
932 goto out;
933 }
934
935 eb = path2->nodes[level];
936 rb_node = tree_search(&cache->rb_root, eb->start);
937 if (!rb_node) {
3fd0a558 938 upper = alloc_backref_node(cache);
5d4f98a2 939 if (!upper) {
3fd0a558 940 free_backref_edge(cache, edge);
5d4f98a2
YZ
941 err = -ENOMEM;
942 goto out;
943 }
5d4f98a2
YZ
944 upper->bytenr = eb->start;
945 upper->owner = btrfs_header_owner(eb);
946 upper->level = lower->level + 1;
3fd0a558
YZ
947 if (!root->ref_cows)
948 upper->cowonly = 1;
5d4f98a2
YZ
949
950 /*
951 * if we know the block isn't shared
952 * we can void checking its backrefs.
953 */
954 if (btrfs_block_can_be_shared(root, eb))
955 upper->checked = 0;
956 else
957 upper->checked = 1;
958
959 /*
960 * add the block to pending list if we
961 * need check its backrefs. only block
962 * at 'cur->level + 1' is added to the
963 * tail of pending list. this guarantees
964 * we check backrefs from lower level
965 * blocks to upper level blocks.
966 */
967 if (!upper->checked &&
968 level == cur->level + 1) {
969 list_add_tail(&edge->list[UPPER],
970 &list);
971 } else
972 INIT_LIST_HEAD(&edge->list[UPPER]);
973 } else {
974 upper = rb_entry(rb_node, struct backref_node,
975 rb_node);
976 BUG_ON(!upper->checked);
977 INIT_LIST_HEAD(&edge->list[UPPER]);
3fd0a558
YZ
978 if (!upper->owner)
979 upper->owner = btrfs_header_owner(eb);
5d4f98a2
YZ
980 }
981 list_add_tail(&edge->list[LOWER], &lower->upper);
5d4f98a2 982 edge->node[LOWER] = lower;
3fd0a558 983 edge->node[UPPER] = upper;
5d4f98a2
YZ
984
985 if (rb_node)
986 break;
987 lower = upper;
988 upper = NULL;
989 }
b3b4aa74 990 btrfs_release_path(path2);
5d4f98a2
YZ
991next:
992 if (ptr < end) {
993 ptr += btrfs_extent_inline_ref_size(key.type);
994 if (ptr >= end) {
995 WARN_ON(ptr > end);
996 ptr = 0;
997 end = 0;
998 }
999 }
1000 if (ptr >= end)
1001 path1->slots[0]++;
1002 }
b3b4aa74 1003 btrfs_release_path(path1);
5d4f98a2
YZ
1004
1005 cur->checked = 1;
1006 WARN_ON(exist);
1007
1008 /* the pending list isn't empty, take the first block to process */
1009 if (!list_empty(&list)) {
1010 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1011 list_del_init(&edge->list[UPPER]);
1012 cur = edge->node[UPPER];
1013 goto again;
1014 }
1015
1016 /*
1017 * everything goes well, connect backref nodes and insert backref nodes
1018 * into the cache.
1019 */
1020 BUG_ON(!node->checked);
3fd0a558
YZ
1021 cowonly = node->cowonly;
1022 if (!cowonly) {
1023 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1024 &node->rb_node);
43c04fb1
JM
1025 if (rb_node)
1026 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
3fd0a558
YZ
1027 list_add_tail(&node->lower, &cache->leaves);
1028 }
5d4f98a2
YZ
1029
1030 list_for_each_entry(edge, &node->upper, list[LOWER])
1031 list_add_tail(&edge->list[UPPER], &list);
1032
1033 while (!list_empty(&list)) {
1034 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1035 list_del_init(&edge->list[UPPER]);
1036 upper = edge->node[UPPER];
3fd0a558
YZ
1037 if (upper->detached) {
1038 list_del(&edge->list[LOWER]);
1039 lower = edge->node[LOWER];
1040 free_backref_edge(cache, edge);
1041 if (list_empty(&lower->upper))
1042 list_add(&lower->list, &useless);
1043 continue;
1044 }
5d4f98a2
YZ
1045
1046 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1047 if (upper->lowest) {
1048 list_del_init(&upper->lower);
1049 upper->lowest = 0;
1050 }
1051
1052 list_add_tail(&edge->list[UPPER], &upper->lower);
1053 continue;
1054 }
1055
1056 BUG_ON(!upper->checked);
3fd0a558
YZ
1057 BUG_ON(cowonly != upper->cowonly);
1058 if (!cowonly) {
1059 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1060 &upper->rb_node);
43c04fb1
JM
1061 if (rb_node)
1062 backref_tree_panic(rb_node, -EEXIST,
1063 upper->bytenr);
3fd0a558 1064 }
5d4f98a2
YZ
1065
1066 list_add_tail(&edge->list[UPPER], &upper->lower);
1067
1068 list_for_each_entry(edge, &upper->upper, list[LOWER])
1069 list_add_tail(&edge->list[UPPER], &list);
1070 }
3fd0a558
YZ
1071 /*
1072 * process useless backref nodes. backref nodes for tree leaves
1073 * are deleted from the cache. backref nodes for upper level
1074 * tree blocks are left in the cache to avoid unnecessary backref
1075 * lookup.
1076 */
1077 while (!list_empty(&useless)) {
1078 upper = list_entry(useless.next, struct backref_node, list);
1079 list_del_init(&upper->list);
1080 BUG_ON(!list_empty(&upper->upper));
1081 if (upper == node)
1082 node = NULL;
1083 if (upper->lowest) {
1084 list_del_init(&upper->lower);
1085 upper->lowest = 0;
1086 }
1087 while (!list_empty(&upper->lower)) {
1088 edge = list_entry(upper->lower.next,
1089 struct backref_edge, list[UPPER]);
1090 list_del(&edge->list[UPPER]);
1091 list_del(&edge->list[LOWER]);
1092 lower = edge->node[LOWER];
1093 free_backref_edge(cache, edge);
1094
1095 if (list_empty(&lower->upper))
1096 list_add(&lower->list, &useless);
1097 }
1098 __mark_block_processed(rc, upper);
1099 if (upper->level > 0) {
1100 list_add(&upper->list, &cache->detached);
1101 upper->detached = 1;
1102 } else {
1103 rb_erase(&upper->rb_node, &cache->rb_root);
1104 free_backref_node(cache, upper);
1105 }
1106 }
5d4f98a2
YZ
1107out:
1108 btrfs_free_path(path1);
1109 btrfs_free_path(path2);
1110 if (err) {
3fd0a558
YZ
1111 while (!list_empty(&useless)) {
1112 lower = list_entry(useless.next,
1113 struct backref_node, upper);
1114 list_del_init(&lower->upper);
1115 }
5d4f98a2 1116 upper = node;
3fd0a558 1117 INIT_LIST_HEAD(&list);
5d4f98a2
YZ
1118 while (upper) {
1119 if (RB_EMPTY_NODE(&upper->rb_node)) {
1120 list_splice_tail(&upper->upper, &list);
3fd0a558 1121 free_backref_node(cache, upper);
5d4f98a2
YZ
1122 }
1123
1124 if (list_empty(&list))
1125 break;
1126
1127 edge = list_entry(list.next, struct backref_edge,
1128 list[LOWER]);
3fd0a558 1129 list_del(&edge->list[LOWER]);
5d4f98a2 1130 upper = edge->node[UPPER];
3fd0a558 1131 free_backref_edge(cache, edge);
5d4f98a2
YZ
1132 }
1133 return ERR_PTR(err);
1134 }
3fd0a558 1135 BUG_ON(node && node->detached);
5d4f98a2
YZ
1136 return node;
1137}
1138
3fd0a558
YZ
1139/*
1140 * helper to add backref node for the newly created snapshot.
1141 * the backref node is created by cloning backref node that
1142 * corresponds to root of source tree
1143 */
1144static int clone_backref_node(struct btrfs_trans_handle *trans,
1145 struct reloc_control *rc,
1146 struct btrfs_root *src,
1147 struct btrfs_root *dest)
1148{
1149 struct btrfs_root *reloc_root = src->reloc_root;
1150 struct backref_cache *cache = &rc->backref_cache;
1151 struct backref_node *node = NULL;
1152 struct backref_node *new_node;
1153 struct backref_edge *edge;
1154 struct backref_edge *new_edge;
1155 struct rb_node *rb_node;
1156
1157 if (cache->last_trans > 0)
1158 update_backref_cache(trans, cache);
1159
1160 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1161 if (rb_node) {
1162 node = rb_entry(rb_node, struct backref_node, rb_node);
1163 if (node->detached)
1164 node = NULL;
1165 else
1166 BUG_ON(node->new_bytenr != reloc_root->node->start);
1167 }
1168
1169 if (!node) {
1170 rb_node = tree_search(&cache->rb_root,
1171 reloc_root->commit_root->start);
1172 if (rb_node) {
1173 node = rb_entry(rb_node, struct backref_node,
1174 rb_node);
1175 BUG_ON(node->detached);
1176 }
1177 }
1178
1179 if (!node)
1180 return 0;
1181
1182 new_node = alloc_backref_node(cache);
1183 if (!new_node)
1184 return -ENOMEM;
1185
1186 new_node->bytenr = dest->node->start;
1187 new_node->level = node->level;
1188 new_node->lowest = node->lowest;
6848ad64 1189 new_node->checked = 1;
3fd0a558
YZ
1190 new_node->root = dest;
1191
1192 if (!node->lowest) {
1193 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1194 new_edge = alloc_backref_edge(cache);
1195 if (!new_edge)
1196 goto fail;
1197
1198 new_edge->node[UPPER] = new_node;
1199 new_edge->node[LOWER] = edge->node[LOWER];
1200 list_add_tail(&new_edge->list[UPPER],
1201 &new_node->lower);
1202 }
76b9e23d
MX
1203 } else {
1204 list_add_tail(&new_node->lower, &cache->leaves);
3fd0a558
YZ
1205 }
1206
1207 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1208 &new_node->rb_node);
43c04fb1
JM
1209 if (rb_node)
1210 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
3fd0a558
YZ
1211
1212 if (!new_node->lowest) {
1213 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1214 list_add_tail(&new_edge->list[LOWER],
1215 &new_edge->node[LOWER]->upper);
1216 }
1217 }
1218 return 0;
1219fail:
1220 while (!list_empty(&new_node->lower)) {
1221 new_edge = list_entry(new_node->lower.next,
1222 struct backref_edge, list[UPPER]);
1223 list_del(&new_edge->list[UPPER]);
1224 free_backref_edge(cache, new_edge);
1225 }
1226 free_backref_node(cache, new_node);
1227 return -ENOMEM;
1228}
1229
5d4f98a2
YZ
1230/*
1231 * helper to add 'address of tree root -> reloc tree' mapping
1232 */
ffd7b339 1233static int __must_check __add_reloc_root(struct btrfs_root *root)
5d4f98a2
YZ
1234{
1235 struct rb_node *rb_node;
1236 struct mapping_node *node;
1237 struct reloc_control *rc = root->fs_info->reloc_ctl;
1238
1239 node = kmalloc(sizeof(*node), GFP_NOFS);
ffd7b339
JM
1240 if (!node)
1241 return -ENOMEM;
5d4f98a2
YZ
1242
1243 node->bytenr = root->node->start;
1244 node->data = root;
1245
1246 spin_lock(&rc->reloc_root_tree.lock);
1247 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1248 node->bytenr, &node->rb_node);
1249 spin_unlock(&rc->reloc_root_tree.lock);
ffd7b339 1250 if (rb_node) {
ffd7b339
JM
1251 btrfs_panic(root->fs_info, -EEXIST, "Duplicate root found "
1252 "for start=%llu while inserting into relocation "
533574c6 1253 "tree\n", node->bytenr);
23291a04
DC
1254 kfree(node);
1255 return -EEXIST;
ffd7b339 1256 }
5d4f98a2
YZ
1257
1258 list_add_tail(&root->root_list, &rc->reloc_roots);
1259 return 0;
1260}
1261
1262/*
1263 * helper to update/delete the 'address of tree root -> reloc tree'
1264 * mapping
1265 */
1266static int __update_reloc_root(struct btrfs_root *root, int del)
1267{
1268 struct rb_node *rb_node;
1269 struct mapping_node *node = NULL;
1270 struct reloc_control *rc = root->fs_info->reloc_ctl;
1271
1272 spin_lock(&rc->reloc_root_tree.lock);
1273 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1274 root->commit_root->start);
1275 if (rb_node) {
1276 node = rb_entry(rb_node, struct mapping_node, rb_node);
1277 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1278 }
1279 spin_unlock(&rc->reloc_root_tree.lock);
1280
8f71f3e0
LB
1281 if (!node)
1282 return 0;
5d4f98a2
YZ
1283 BUG_ON((struct btrfs_root *)node->data != root);
1284
1285 if (!del) {
1286 spin_lock(&rc->reloc_root_tree.lock);
1287 node->bytenr = root->node->start;
1288 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1289 node->bytenr, &node->rb_node);
1290 spin_unlock(&rc->reloc_root_tree.lock);
43c04fb1
JM
1291 if (rb_node)
1292 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
5d4f98a2 1293 } else {
1daf3540 1294 spin_lock(&root->fs_info->trans_lock);
5d4f98a2 1295 list_del_init(&root->root_list);
1daf3540 1296 spin_unlock(&root->fs_info->trans_lock);
5d4f98a2
YZ
1297 kfree(node);
1298 }
1299 return 0;
1300}
1301
3fd0a558
YZ
1302static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1303 struct btrfs_root *root, u64 objectid)
5d4f98a2
YZ
1304{
1305 struct btrfs_root *reloc_root;
1306 struct extent_buffer *eb;
1307 struct btrfs_root_item *root_item;
1308 struct btrfs_key root_key;
1309 int ret;
1310
5d4f98a2
YZ
1311 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1312 BUG_ON(!root_item);
1313
1314 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1315 root_key.type = BTRFS_ROOT_ITEM_KEY;
3fd0a558 1316 root_key.offset = objectid;
5d4f98a2 1317
3fd0a558
YZ
1318 if (root->root_key.objectid == objectid) {
1319 /* called by btrfs_init_reloc_root */
1320 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1321 BTRFS_TREE_RELOC_OBJECTID);
1322 BUG_ON(ret);
1323
1324 btrfs_set_root_last_snapshot(&root->root_item,
1325 trans->transid - 1);
1326 } else {
1327 /*
1328 * called by btrfs_reloc_post_snapshot_hook.
1329 * the source tree is a reloc tree, all tree blocks
1330 * modified after it was created have RELOC flag
1331 * set in their headers. so it's OK to not update
1332 * the 'last_snapshot'.
1333 */
1334 ret = btrfs_copy_root(trans, root, root->node, &eb,
1335 BTRFS_TREE_RELOC_OBJECTID);
1336 BUG_ON(ret);
1337 }
5d4f98a2 1338
5d4f98a2 1339 memcpy(root_item, &root->root_item, sizeof(*root_item));
5d4f98a2
YZ
1340 btrfs_set_root_bytenr(root_item, eb->start);
1341 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1342 btrfs_set_root_generation(root_item, trans->transid);
3fd0a558
YZ
1343
1344 if (root->root_key.objectid == objectid) {
1345 btrfs_set_root_refs(root_item, 0);
1346 memset(&root_item->drop_progress, 0,
1347 sizeof(struct btrfs_disk_key));
1348 root_item->drop_level = 0;
1349 }
5d4f98a2
YZ
1350
1351 btrfs_tree_unlock(eb);
1352 free_extent_buffer(eb);
1353
1354 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
1355 &root_key, root_item);
1356 BUG_ON(ret);
1357 kfree(root_item);
1358
1359 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
1360 &root_key);
1361 BUG_ON(IS_ERR(reloc_root));
1362 reloc_root->last_trans = trans->transid;
3fd0a558
YZ
1363 return reloc_root;
1364}
1365
1366/*
1367 * create reloc tree for a given fs tree. reloc tree is just a
1368 * snapshot of the fs tree with special root objectid.
1369 */
1370int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1371 struct btrfs_root *root)
1372{
1373 struct btrfs_root *reloc_root;
1374 struct reloc_control *rc = root->fs_info->reloc_ctl;
1375 int clear_rsv = 0;
ffd7b339 1376 int ret;
3fd0a558
YZ
1377
1378 if (root->reloc_root) {
1379 reloc_root = root->reloc_root;
1380 reloc_root->last_trans = trans->transid;
1381 return 0;
1382 }
1383
1384 if (!rc || !rc->create_reloc_tree ||
1385 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1386 return 0;
1387
1388 if (!trans->block_rsv) {
1389 trans->block_rsv = rc->block_rsv;
1390 clear_rsv = 1;
1391 }
1392 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1393 if (clear_rsv)
1394 trans->block_rsv = NULL;
5d4f98a2 1395
ffd7b339
JM
1396 ret = __add_reloc_root(reloc_root);
1397 BUG_ON(ret < 0);
5d4f98a2
YZ
1398 root->reloc_root = reloc_root;
1399 return 0;
1400}
1401
1402/*
1403 * update root item of reloc tree
1404 */
1405int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1406 struct btrfs_root *root)
1407{
1408 struct btrfs_root *reloc_root;
1409 struct btrfs_root_item *root_item;
1410 int del = 0;
1411 int ret;
1412
1413 if (!root->reloc_root)
7585717f 1414 goto out;
5d4f98a2
YZ
1415
1416 reloc_root = root->reloc_root;
1417 root_item = &reloc_root->root_item;
1418
3fd0a558
YZ
1419 if (root->fs_info->reloc_ctl->merge_reloc_tree &&
1420 btrfs_root_refs(root_item) == 0) {
5d4f98a2
YZ
1421 root->reloc_root = NULL;
1422 del = 1;
1423 }
1424
1425 __update_reloc_root(reloc_root, del);
1426
1427 if (reloc_root->commit_root != reloc_root->node) {
1428 btrfs_set_root_node(root_item, reloc_root->node);
1429 free_extent_buffer(reloc_root->commit_root);
1430 reloc_root->commit_root = btrfs_root_node(reloc_root);
1431 }
1432
1433 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1434 &reloc_root->root_key, root_item);
1435 BUG_ON(ret);
7585717f
CM
1436
1437out:
5d4f98a2
YZ
1438 return 0;
1439}
1440
1441/*
1442 * helper to find first cached inode with inode number >= objectid
1443 * in a subvolume
1444 */
1445static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1446{
1447 struct rb_node *node;
1448 struct rb_node *prev;
1449 struct btrfs_inode *entry;
1450 struct inode *inode;
1451
1452 spin_lock(&root->inode_lock);
1453again:
1454 node = root->inode_tree.rb_node;
1455 prev = NULL;
1456 while (node) {
1457 prev = node;
1458 entry = rb_entry(node, struct btrfs_inode, rb_node);
1459
33345d01 1460 if (objectid < btrfs_ino(&entry->vfs_inode))
5d4f98a2 1461 node = node->rb_left;
33345d01 1462 else if (objectid > btrfs_ino(&entry->vfs_inode))
5d4f98a2
YZ
1463 node = node->rb_right;
1464 else
1465 break;
1466 }
1467 if (!node) {
1468 while (prev) {
1469 entry = rb_entry(prev, struct btrfs_inode, rb_node);
33345d01 1470 if (objectid <= btrfs_ino(&entry->vfs_inode)) {
5d4f98a2
YZ
1471 node = prev;
1472 break;
1473 }
1474 prev = rb_next(prev);
1475 }
1476 }
1477 while (node) {
1478 entry = rb_entry(node, struct btrfs_inode, rb_node);
1479 inode = igrab(&entry->vfs_inode);
1480 if (inode) {
1481 spin_unlock(&root->inode_lock);
1482 return inode;
1483 }
1484
33345d01 1485 objectid = btrfs_ino(&entry->vfs_inode) + 1;
5d4f98a2
YZ
1486 if (cond_resched_lock(&root->inode_lock))
1487 goto again;
1488
1489 node = rb_next(node);
1490 }
1491 spin_unlock(&root->inode_lock);
1492 return NULL;
1493}
1494
1495static int in_block_group(u64 bytenr,
1496 struct btrfs_block_group_cache *block_group)
1497{
1498 if (bytenr >= block_group->key.objectid &&
1499 bytenr < block_group->key.objectid + block_group->key.offset)
1500 return 1;
1501 return 0;
1502}
1503
1504/*
1505 * get new location of data
1506 */
1507static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1508 u64 bytenr, u64 num_bytes)
1509{
1510 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1511 struct btrfs_path *path;
1512 struct btrfs_file_extent_item *fi;
1513 struct extent_buffer *leaf;
1514 int ret;
1515
1516 path = btrfs_alloc_path();
1517 if (!path)
1518 return -ENOMEM;
1519
1520 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
33345d01 1521 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
5d4f98a2
YZ
1522 bytenr, 0);
1523 if (ret < 0)
1524 goto out;
1525 if (ret > 0) {
1526 ret = -ENOENT;
1527 goto out;
1528 }
1529
1530 leaf = path->nodes[0];
1531 fi = btrfs_item_ptr(leaf, path->slots[0],
1532 struct btrfs_file_extent_item);
1533
1534 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1535 btrfs_file_extent_compression(leaf, fi) ||
1536 btrfs_file_extent_encryption(leaf, fi) ||
1537 btrfs_file_extent_other_encoding(leaf, fi));
1538
1539 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1540 ret = 1;
1541 goto out;
1542 }
1543
3fd0a558 1544 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5d4f98a2
YZ
1545 ret = 0;
1546out:
1547 btrfs_free_path(path);
1548 return ret;
1549}
1550
1551/*
1552 * update file extent items in the tree leaf to point to
1553 * the new locations.
1554 */
3fd0a558
YZ
1555static noinline_for_stack
1556int replace_file_extents(struct btrfs_trans_handle *trans,
1557 struct reloc_control *rc,
1558 struct btrfs_root *root,
1559 struct extent_buffer *leaf)
5d4f98a2
YZ
1560{
1561 struct btrfs_key key;
1562 struct btrfs_file_extent_item *fi;
1563 struct inode *inode = NULL;
5d4f98a2
YZ
1564 u64 parent;
1565 u64 bytenr;
3fd0a558 1566 u64 new_bytenr = 0;
5d4f98a2
YZ
1567 u64 num_bytes;
1568 u64 end;
1569 u32 nritems;
1570 u32 i;
1571 int ret;
1572 int first = 1;
1573 int dirty = 0;
1574
1575 if (rc->stage != UPDATE_DATA_PTRS)
1576 return 0;
1577
1578 /* reloc trees always use full backref */
1579 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1580 parent = leaf->start;
1581 else
1582 parent = 0;
1583
1584 nritems = btrfs_header_nritems(leaf);
1585 for (i = 0; i < nritems; i++) {
1586 cond_resched();
1587 btrfs_item_key_to_cpu(leaf, &key, i);
1588 if (key.type != BTRFS_EXTENT_DATA_KEY)
1589 continue;
1590 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1591 if (btrfs_file_extent_type(leaf, fi) ==
1592 BTRFS_FILE_EXTENT_INLINE)
1593 continue;
1594 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1595 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1596 if (bytenr == 0)
1597 continue;
1598 if (!in_block_group(bytenr, rc->block_group))
1599 continue;
1600
1601 /*
1602 * if we are modifying block in fs tree, wait for readpage
1603 * to complete and drop the extent cache
1604 */
1605 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5d4f98a2
YZ
1606 if (first) {
1607 inode = find_next_inode(root, key.objectid);
5d4f98a2 1608 first = 0;
33345d01 1609 } else if (inode && btrfs_ino(inode) < key.objectid) {
3fd0a558 1610 btrfs_add_delayed_iput(inode);
5d4f98a2 1611 inode = find_next_inode(root, key.objectid);
5d4f98a2 1612 }
33345d01 1613 if (inode && btrfs_ino(inode) == key.objectid) {
5d4f98a2
YZ
1614 end = key.offset +
1615 btrfs_file_extent_num_bytes(leaf, fi);
1616 WARN_ON(!IS_ALIGNED(key.offset,
1617 root->sectorsize));
1618 WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1619 end--;
1620 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
d0082371 1621 key.offset, end);
5d4f98a2
YZ
1622 if (!ret)
1623 continue;
1624
1625 btrfs_drop_extent_cache(inode, key.offset, end,
1626 1);
1627 unlock_extent(&BTRFS_I(inode)->io_tree,
d0082371 1628 key.offset, end);
5d4f98a2
YZ
1629 }
1630 }
1631
1632 ret = get_new_location(rc->data_inode, &new_bytenr,
1633 bytenr, num_bytes);
3fd0a558
YZ
1634 if (ret > 0) {
1635 WARN_ON(1);
5d4f98a2 1636 continue;
3fd0a558 1637 }
5d4f98a2
YZ
1638 BUG_ON(ret < 0);
1639
1640 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1641 dirty = 1;
1642
1643 key.offset -= btrfs_file_extent_offset(leaf, fi);
1644 ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1645 num_bytes, parent,
1646 btrfs_header_owner(leaf),
66d7e7f0 1647 key.objectid, key.offset, 1);
5d4f98a2
YZ
1648 BUG_ON(ret);
1649
1650 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1651 parent, btrfs_header_owner(leaf),
66d7e7f0 1652 key.objectid, key.offset, 1);
5d4f98a2
YZ
1653 BUG_ON(ret);
1654 }
1655 if (dirty)
1656 btrfs_mark_buffer_dirty(leaf);
3fd0a558
YZ
1657 if (inode)
1658 btrfs_add_delayed_iput(inode);
5d4f98a2
YZ
1659 return 0;
1660}
1661
1662static noinline_for_stack
1663int memcmp_node_keys(struct extent_buffer *eb, int slot,
1664 struct btrfs_path *path, int level)
1665{
1666 struct btrfs_disk_key key1;
1667 struct btrfs_disk_key key2;
1668 btrfs_node_key(eb, &key1, slot);
1669 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1670 return memcmp(&key1, &key2, sizeof(key1));
1671}
1672
1673/*
1674 * try to replace tree blocks in fs tree with the new blocks
1675 * in reloc tree. tree blocks haven't been modified since the
1676 * reloc tree was create can be replaced.
1677 *
1678 * if a block was replaced, level of the block + 1 is returned.
1679 * if no block got replaced, 0 is returned. if there are other
1680 * errors, a negative error number is returned.
1681 */
3fd0a558
YZ
1682static noinline_for_stack
1683int replace_path(struct btrfs_trans_handle *trans,
1684 struct btrfs_root *dest, struct btrfs_root *src,
1685 struct btrfs_path *path, struct btrfs_key *next_key,
1686 int lowest_level, int max_level)
5d4f98a2
YZ
1687{
1688 struct extent_buffer *eb;
1689 struct extent_buffer *parent;
1690 struct btrfs_key key;
1691 u64 old_bytenr;
1692 u64 new_bytenr;
1693 u64 old_ptr_gen;
1694 u64 new_ptr_gen;
1695 u64 last_snapshot;
1696 u32 blocksize;
3fd0a558 1697 int cow = 0;
5d4f98a2
YZ
1698 int level;
1699 int ret;
1700 int slot;
1701
1702 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1703 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
5d4f98a2
YZ
1704
1705 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
3fd0a558 1706again:
5d4f98a2
YZ
1707 slot = path->slots[lowest_level];
1708 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1709
1710 eb = btrfs_lock_root_node(dest);
1711 btrfs_set_lock_blocking(eb);
1712 level = btrfs_header_level(eb);
1713
1714 if (level < lowest_level) {
1715 btrfs_tree_unlock(eb);
1716 free_extent_buffer(eb);
1717 return 0;
1718 }
1719
3fd0a558
YZ
1720 if (cow) {
1721 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1722 BUG_ON(ret);
1723 }
5d4f98a2
YZ
1724 btrfs_set_lock_blocking(eb);
1725
1726 if (next_key) {
1727 next_key->objectid = (u64)-1;
1728 next_key->type = (u8)-1;
1729 next_key->offset = (u64)-1;
1730 }
1731
1732 parent = eb;
1733 while (1) {
1734 level = btrfs_header_level(parent);
1735 BUG_ON(level < lowest_level);
1736
1737 ret = btrfs_bin_search(parent, &key, level, &slot);
1738 if (ret && slot > 0)
1739 slot--;
1740
1741 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1742 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1743
1744 old_bytenr = btrfs_node_blockptr(parent, slot);
1745 blocksize = btrfs_level_size(dest, level - 1);
1746 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1747
1748 if (level <= max_level) {
1749 eb = path->nodes[level];
1750 new_bytenr = btrfs_node_blockptr(eb,
1751 path->slots[level]);
1752 new_ptr_gen = btrfs_node_ptr_generation(eb,
1753 path->slots[level]);
1754 } else {
1755 new_bytenr = 0;
1756 new_ptr_gen = 0;
1757 }
1758
1759 if (new_bytenr > 0 && new_bytenr == old_bytenr) {
1760 WARN_ON(1);
1761 ret = level;
1762 break;
1763 }
1764
1765 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1766 memcmp_node_keys(parent, slot, path, level)) {
3fd0a558 1767 if (level <= lowest_level) {
5d4f98a2
YZ
1768 ret = 0;
1769 break;
1770 }
1771
1772 eb = read_tree_block(dest, old_bytenr, blocksize,
1773 old_ptr_gen);
97d9a8a4 1774 BUG_ON(!eb);
5d4f98a2 1775 btrfs_tree_lock(eb);
3fd0a558
YZ
1776 if (cow) {
1777 ret = btrfs_cow_block(trans, dest, eb, parent,
1778 slot, &eb);
1779 BUG_ON(ret);
5d4f98a2 1780 }
3fd0a558 1781 btrfs_set_lock_blocking(eb);
5d4f98a2
YZ
1782
1783 btrfs_tree_unlock(parent);
1784 free_extent_buffer(parent);
1785
1786 parent = eb;
1787 continue;
1788 }
1789
3fd0a558
YZ
1790 if (!cow) {
1791 btrfs_tree_unlock(parent);
1792 free_extent_buffer(parent);
1793 cow = 1;
1794 goto again;
1795 }
1796
5d4f98a2
YZ
1797 btrfs_node_key_to_cpu(path->nodes[level], &key,
1798 path->slots[level]);
b3b4aa74 1799 btrfs_release_path(path);
5d4f98a2
YZ
1800
1801 path->lowest_level = level;
1802 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1803 path->lowest_level = 0;
1804 BUG_ON(ret);
1805
1806 /*
1807 * swap blocks in fs tree and reloc tree.
1808 */
1809 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1810 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1811 btrfs_mark_buffer_dirty(parent);
1812
1813 btrfs_set_node_blockptr(path->nodes[level],
1814 path->slots[level], old_bytenr);
1815 btrfs_set_node_ptr_generation(path->nodes[level],
1816 path->slots[level], old_ptr_gen);
1817 btrfs_mark_buffer_dirty(path->nodes[level]);
1818
1819 ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
1820 path->nodes[level]->start,
66d7e7f0
AJ
1821 src->root_key.objectid, level - 1, 0,
1822 1);
5d4f98a2
YZ
1823 BUG_ON(ret);
1824 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
1825 0, dest->root_key.objectid, level - 1,
66d7e7f0 1826 0, 1);
5d4f98a2
YZ
1827 BUG_ON(ret);
1828
1829 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1830 path->nodes[level]->start,
66d7e7f0
AJ
1831 src->root_key.objectid, level - 1, 0,
1832 1);
5d4f98a2
YZ
1833 BUG_ON(ret);
1834
1835 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1836 0, dest->root_key.objectid, level - 1,
66d7e7f0 1837 0, 1);
5d4f98a2
YZ
1838 BUG_ON(ret);
1839
1840 btrfs_unlock_up_safe(path, 0);
1841
1842 ret = level;
1843 break;
1844 }
1845 btrfs_tree_unlock(parent);
1846 free_extent_buffer(parent);
1847 return ret;
1848}
1849
1850/*
1851 * helper to find next relocated block in reloc tree
1852 */
1853static noinline_for_stack
1854int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1855 int *level)
1856{
1857 struct extent_buffer *eb;
1858 int i;
1859 u64 last_snapshot;
1860 u32 nritems;
1861
1862 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1863
1864 for (i = 0; i < *level; i++) {
1865 free_extent_buffer(path->nodes[i]);
1866 path->nodes[i] = NULL;
1867 }
1868
1869 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1870 eb = path->nodes[i];
1871 nritems = btrfs_header_nritems(eb);
1872 while (path->slots[i] + 1 < nritems) {
1873 path->slots[i]++;
1874 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1875 last_snapshot)
1876 continue;
1877
1878 *level = i;
1879 return 0;
1880 }
1881 free_extent_buffer(path->nodes[i]);
1882 path->nodes[i] = NULL;
1883 }
1884 return 1;
1885}
1886
1887/*
1888 * walk down reloc tree to find relocated block of lowest level
1889 */
1890static noinline_for_stack
1891int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1892 int *level)
1893{
1894 struct extent_buffer *eb = NULL;
1895 int i;
1896 u64 bytenr;
1897 u64 ptr_gen = 0;
1898 u64 last_snapshot;
1899 u32 blocksize;
1900 u32 nritems;
1901
1902 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1903
1904 for (i = *level; i > 0; i--) {
1905 eb = path->nodes[i];
1906 nritems = btrfs_header_nritems(eb);
1907 while (path->slots[i] < nritems) {
1908 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1909 if (ptr_gen > last_snapshot)
1910 break;
1911 path->slots[i]++;
1912 }
1913 if (path->slots[i] >= nritems) {
1914 if (i == *level)
1915 break;
1916 *level = i + 1;
1917 return 0;
1918 }
1919 if (i == 1) {
1920 *level = i;
1921 return 0;
1922 }
1923
1924 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
1925 blocksize = btrfs_level_size(root, i - 1);
1926 eb = read_tree_block(root, bytenr, blocksize, ptr_gen);
1927 BUG_ON(btrfs_header_level(eb) != i - 1);
1928 path->nodes[i - 1] = eb;
1929 path->slots[i - 1] = 0;
1930 }
1931 return 1;
1932}
1933
1934/*
1935 * invalidate extent cache for file extents whose key in range of
1936 * [min_key, max_key)
1937 */
1938static int invalidate_extent_cache(struct btrfs_root *root,
1939 struct btrfs_key *min_key,
1940 struct btrfs_key *max_key)
1941{
1942 struct inode *inode = NULL;
1943 u64 objectid;
1944 u64 start, end;
33345d01 1945 u64 ino;
5d4f98a2
YZ
1946
1947 objectid = min_key->objectid;
1948 while (1) {
1949 cond_resched();
1950 iput(inode);
1951
1952 if (objectid > max_key->objectid)
1953 break;
1954
1955 inode = find_next_inode(root, objectid);
1956 if (!inode)
1957 break;
33345d01 1958 ino = btrfs_ino(inode);
5d4f98a2 1959
33345d01 1960 if (ino > max_key->objectid) {
5d4f98a2
YZ
1961 iput(inode);
1962 break;
1963 }
1964
33345d01 1965 objectid = ino + 1;
5d4f98a2
YZ
1966 if (!S_ISREG(inode->i_mode))
1967 continue;
1968
33345d01 1969 if (unlikely(min_key->objectid == ino)) {
5d4f98a2
YZ
1970 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1971 continue;
1972 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1973 start = 0;
1974 else {
1975 start = min_key->offset;
1976 WARN_ON(!IS_ALIGNED(start, root->sectorsize));
1977 }
1978 } else {
1979 start = 0;
1980 }
1981
33345d01 1982 if (unlikely(max_key->objectid == ino)) {
5d4f98a2
YZ
1983 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1984 continue;
1985 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1986 end = (u64)-1;
1987 } else {
1988 if (max_key->offset == 0)
1989 continue;
1990 end = max_key->offset;
1991 WARN_ON(!IS_ALIGNED(end, root->sectorsize));
1992 end--;
1993 }
1994 } else {
1995 end = (u64)-1;
1996 }
1997
1998 /* the lock_extent waits for readpage to complete */
d0082371 1999 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
5d4f98a2 2000 btrfs_drop_extent_cache(inode, start, end, 1);
d0082371 2001 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
5d4f98a2
YZ
2002 }
2003 return 0;
2004}
2005
2006static int find_next_key(struct btrfs_path *path, int level,
2007 struct btrfs_key *key)
2008
2009{
2010 while (level < BTRFS_MAX_LEVEL) {
2011 if (!path->nodes[level])
2012 break;
2013 if (path->slots[level] + 1 <
2014 btrfs_header_nritems(path->nodes[level])) {
2015 btrfs_node_key_to_cpu(path->nodes[level], key,
2016 path->slots[level] + 1);
2017 return 0;
2018 }
2019 level++;
2020 }
2021 return 1;
2022}
2023
2024/*
2025 * merge the relocated tree blocks in reloc tree with corresponding
2026 * fs tree.
2027 */
2028static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2029 struct btrfs_root *root)
2030{
2031 LIST_HEAD(inode_list);
2032 struct btrfs_key key;
2033 struct btrfs_key next_key;
2034 struct btrfs_trans_handle *trans;
2035 struct btrfs_root *reloc_root;
2036 struct btrfs_root_item *root_item;
2037 struct btrfs_path *path;
3fd0a558 2038 struct extent_buffer *leaf;
5d4f98a2
YZ
2039 int level;
2040 int max_level;
2041 int replaced = 0;
2042 int ret;
2043 int err = 0;
3fd0a558 2044 u32 min_reserved;
5d4f98a2
YZ
2045
2046 path = btrfs_alloc_path();
2047 if (!path)
2048 return -ENOMEM;
026fd317 2049 path->reada = 1;
5d4f98a2
YZ
2050
2051 reloc_root = root->reloc_root;
2052 root_item = &reloc_root->root_item;
2053
2054 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2055 level = btrfs_root_level(root_item);
2056 extent_buffer_get(reloc_root->node);
2057 path->nodes[level] = reloc_root->node;
2058 path->slots[level] = 0;
2059 } else {
2060 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2061
2062 level = root_item->drop_level;
2063 BUG_ON(level == 0);
2064 path->lowest_level = level;
2065 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
33c66f43 2066 path->lowest_level = 0;
5d4f98a2
YZ
2067 if (ret < 0) {
2068 btrfs_free_path(path);
2069 return ret;
2070 }
2071
2072 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2073 path->slots[level]);
2074 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2075
2076 btrfs_unlock_up_safe(path, 0);
2077 }
2078
3fd0a558
YZ
2079 min_reserved = root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2080 memset(&next_key, 0, sizeof(next_key));
5d4f98a2 2081
3fd0a558
YZ
2082 while (1) {
2083 trans = btrfs_start_transaction(root, 0);
98d5dc13 2084 BUG_ON(IS_ERR(trans));
3fd0a558 2085 trans->block_rsv = rc->block_rsv;
5d4f98a2 2086
08e007d2
MX
2087 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2088 BTRFS_RESERVE_FLUSH_ALL);
3fd0a558
YZ
2089 if (ret) {
2090 BUG_ON(ret != -EAGAIN);
2091 ret = btrfs_commit_transaction(trans, root);
2092 BUG_ON(ret);
2093 continue;
5d4f98a2
YZ
2094 }
2095
5d4f98a2 2096 replaced = 0;
5d4f98a2
YZ
2097 max_level = level;
2098
2099 ret = walk_down_reloc_tree(reloc_root, path, &level);
2100 if (ret < 0) {
2101 err = ret;
2102 goto out;
2103 }
2104 if (ret > 0)
2105 break;
2106
2107 if (!find_next_key(path, level, &key) &&
2108 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2109 ret = 0;
5d4f98a2 2110 } else {
3fd0a558
YZ
2111 ret = replace_path(trans, root, reloc_root, path,
2112 &next_key, level, max_level);
5d4f98a2
YZ
2113 }
2114 if (ret < 0) {
2115 err = ret;
2116 goto out;
2117 }
2118
2119 if (ret > 0) {
2120 level = ret;
2121 btrfs_node_key_to_cpu(path->nodes[level], &key,
2122 path->slots[level]);
2123 replaced = 1;
5d4f98a2
YZ
2124 }
2125
2126 ret = walk_up_reloc_tree(reloc_root, path, &level);
2127 if (ret > 0)
2128 break;
2129
2130 BUG_ON(level == 0);
2131 /*
2132 * save the merging progress in the drop_progress.
2133 * this is OK since root refs == 1 in this case.
2134 */
2135 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2136 path->slots[level]);
2137 root_item->drop_level = level;
2138
3fd0a558 2139 btrfs_end_transaction_throttle(trans, root);
5d4f98a2 2140
b53d3f5d 2141 btrfs_btree_balance_dirty(root);
5d4f98a2
YZ
2142
2143 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2144 invalidate_extent_cache(root, &key, &next_key);
2145 }
2146
2147 /*
2148 * handle the case only one block in the fs tree need to be
2149 * relocated and the block is tree root.
2150 */
2151 leaf = btrfs_lock_root_node(root);
2152 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2153 btrfs_tree_unlock(leaf);
2154 free_extent_buffer(leaf);
2155 if (ret < 0)
2156 err = ret;
2157out:
2158 btrfs_free_path(path);
2159
2160 if (err == 0) {
2161 memset(&root_item->drop_progress, 0,
2162 sizeof(root_item->drop_progress));
2163 root_item->drop_level = 0;
2164 btrfs_set_root_refs(root_item, 0);
3fd0a558 2165 btrfs_update_reloc_root(trans, root);
5d4f98a2
YZ
2166 }
2167
3fd0a558 2168 btrfs_end_transaction_throttle(trans, root);
5d4f98a2 2169
b53d3f5d 2170 btrfs_btree_balance_dirty(root);
5d4f98a2 2171
5d4f98a2
YZ
2172 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2173 invalidate_extent_cache(root, &key, &next_key);
2174
2175 return err;
2176}
2177
3fd0a558
YZ
2178static noinline_for_stack
2179int prepare_to_merge(struct reloc_control *rc, int err)
5d4f98a2 2180{
3fd0a558 2181 struct btrfs_root *root = rc->extent_root;
5d4f98a2 2182 struct btrfs_root *reloc_root;
3fd0a558
YZ
2183 struct btrfs_trans_handle *trans;
2184 LIST_HEAD(reloc_roots);
2185 u64 num_bytes = 0;
2186 int ret;
3fd0a558 2187
7585717f 2188 mutex_lock(&root->fs_info->reloc_mutex);
3fd0a558
YZ
2189 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2190 rc->merging_rsv_size += rc->nodes_relocated * 2;
7585717f
CM
2191 mutex_unlock(&root->fs_info->reloc_mutex);
2192
3fd0a558
YZ
2193again:
2194 if (!err) {
2195 num_bytes = rc->merging_rsv_size;
08e007d2
MX
2196 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2197 BTRFS_RESERVE_FLUSH_ALL);
3fd0a558
YZ
2198 if (ret)
2199 err = ret;
2200 }
2201
7a7eaa40 2202 trans = btrfs_join_transaction(rc->extent_root);
3612b495
TI
2203 if (IS_ERR(trans)) {
2204 if (!err)
2205 btrfs_block_rsv_release(rc->extent_root,
2206 rc->block_rsv, num_bytes);
2207 return PTR_ERR(trans);
2208 }
3fd0a558
YZ
2209
2210 if (!err) {
2211 if (num_bytes != rc->merging_rsv_size) {
2212 btrfs_end_transaction(trans, rc->extent_root);
2213 btrfs_block_rsv_release(rc->extent_root,
2214 rc->block_rsv, num_bytes);
3fd0a558
YZ
2215 goto again;
2216 }
2217 }
5d4f98a2 2218
3fd0a558
YZ
2219 rc->merge_reloc_tree = 1;
2220
2221 while (!list_empty(&rc->reloc_roots)) {
2222 reloc_root = list_entry(rc->reloc_roots.next,
2223 struct btrfs_root, root_list);
2224 list_del_init(&reloc_root->root_list);
5d4f98a2 2225
5d4f98a2
YZ
2226 root = read_fs_root(reloc_root->fs_info,
2227 reloc_root->root_key.offset);
2228 BUG_ON(IS_ERR(root));
2229 BUG_ON(root->reloc_root != reloc_root);
2230
3fd0a558
YZ
2231 /*
2232 * set reference count to 1, so btrfs_recover_relocation
2233 * knows it should resumes merging
2234 */
2235 if (!err)
2236 btrfs_set_root_refs(&reloc_root->root_item, 1);
5d4f98a2 2237 btrfs_update_reloc_root(trans, root);
5d4f98a2 2238
3fd0a558
YZ
2239 list_add(&reloc_root->root_list, &reloc_roots);
2240 }
5d4f98a2 2241
3fd0a558 2242 list_splice(&reloc_roots, &rc->reloc_roots);
5d4f98a2 2243
3fd0a558
YZ
2244 if (!err)
2245 btrfs_commit_transaction(trans, rc->extent_root);
2246 else
2247 btrfs_end_transaction(trans, rc->extent_root);
2248 return err;
5d4f98a2
YZ
2249}
2250
aca1bba6
LB
2251static noinline_for_stack
2252void free_reloc_roots(struct list_head *list)
2253{
2254 struct btrfs_root *reloc_root;
2255
2256 while (!list_empty(list)) {
2257 reloc_root = list_entry(list->next, struct btrfs_root,
2258 root_list);
2259 __update_reloc_root(reloc_root, 1);
2260 free_extent_buffer(reloc_root->node);
2261 free_extent_buffer(reloc_root->commit_root);
2262 kfree(reloc_root);
2263 }
2264}
2265
3fd0a558
YZ
2266static noinline_for_stack
2267int merge_reloc_roots(struct reloc_control *rc)
5d4f98a2 2268{
5d4f98a2 2269 struct btrfs_root *root;
3fd0a558
YZ
2270 struct btrfs_root *reloc_root;
2271 LIST_HEAD(reloc_roots);
2272 int found = 0;
aca1bba6 2273 int ret = 0;
3fd0a558
YZ
2274again:
2275 root = rc->extent_root;
7585717f
CM
2276
2277 /*
2278 * this serializes us with btrfs_record_root_in_transaction,
2279 * we have to make sure nobody is in the middle of
2280 * adding their roots to the list while we are
2281 * doing this splice
2282 */
2283 mutex_lock(&root->fs_info->reloc_mutex);
3fd0a558 2284 list_splice_init(&rc->reloc_roots, &reloc_roots);
7585717f 2285 mutex_unlock(&root->fs_info->reloc_mutex);
5d4f98a2 2286
3fd0a558
YZ
2287 while (!list_empty(&reloc_roots)) {
2288 found = 1;
2289 reloc_root = list_entry(reloc_roots.next,
2290 struct btrfs_root, root_list);
5d4f98a2 2291
3fd0a558
YZ
2292 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2293 root = read_fs_root(reloc_root->fs_info,
2294 reloc_root->root_key.offset);
2295 BUG_ON(IS_ERR(root));
2296 BUG_ON(root->reloc_root != reloc_root);
5d4f98a2 2297
3fd0a558 2298 ret = merge_reloc_root(rc, root);
aca1bba6
LB
2299 if (ret)
2300 goto out;
3fd0a558
YZ
2301 } else {
2302 list_del_init(&reloc_root->root_list);
2303 }
2c536799 2304 ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
aca1bba6
LB
2305 if (ret < 0) {
2306 if (list_empty(&reloc_root->root_list))
2307 list_add_tail(&reloc_root->root_list,
2308 &reloc_roots);
2309 goto out;
2310 }
5d4f98a2
YZ
2311 }
2312
3fd0a558
YZ
2313 if (found) {
2314 found = 0;
2315 goto again;
2316 }
aca1bba6
LB
2317out:
2318 if (ret) {
2319 btrfs_std_error(root->fs_info, ret);
2320 if (!list_empty(&reloc_roots))
2321 free_reloc_roots(&reloc_roots);
2322 }
2323
5d4f98a2 2324 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
aca1bba6 2325 return ret;
5d4f98a2
YZ
2326}
2327
2328static void free_block_list(struct rb_root *blocks)
2329{
2330 struct tree_block *block;
2331 struct rb_node *rb_node;
2332 while ((rb_node = rb_first(blocks))) {
2333 block = rb_entry(rb_node, struct tree_block, rb_node);
2334 rb_erase(rb_node, blocks);
2335 kfree(block);
2336 }
2337}
2338
2339static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2340 struct btrfs_root *reloc_root)
2341{
2342 struct btrfs_root *root;
2343
2344 if (reloc_root->last_trans == trans->transid)
2345 return 0;
2346
2347 root = read_fs_root(reloc_root->fs_info, reloc_root->root_key.offset);
2348 BUG_ON(IS_ERR(root));
2349 BUG_ON(root->reloc_root != reloc_root);
2350
2351 return btrfs_record_root_in_trans(trans, root);
2352}
2353
3fd0a558
YZ
2354static noinline_for_stack
2355struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2356 struct reloc_control *rc,
2357 struct backref_node *node,
2358 struct backref_edge *edges[], int *nr)
5d4f98a2
YZ
2359{
2360 struct backref_node *next;
2361 struct btrfs_root *root;
3fd0a558
YZ
2362 int index = 0;
2363
5d4f98a2
YZ
2364 next = node;
2365 while (1) {
2366 cond_resched();
2367 next = walk_up_backref(next, edges, &index);
2368 root = next->root;
3fd0a558
YZ
2369 BUG_ON(!root);
2370 BUG_ON(!root->ref_cows);
5d4f98a2
YZ
2371
2372 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2373 record_reloc_root_in_trans(trans, root);
2374 break;
2375 }
2376
3fd0a558
YZ
2377 btrfs_record_root_in_trans(trans, root);
2378 root = root->reloc_root;
2379
2380 if (next->new_bytenr != root->node->start) {
2381 BUG_ON(next->new_bytenr);
2382 BUG_ON(!list_empty(&next->list));
2383 next->new_bytenr = root->node->start;
2384 next->root = root;
2385 list_add_tail(&next->list,
2386 &rc->backref_cache.changed);
2387 __mark_block_processed(rc, next);
5d4f98a2
YZ
2388 break;
2389 }
2390
3fd0a558 2391 WARN_ON(1);
5d4f98a2
YZ
2392 root = NULL;
2393 next = walk_down_backref(edges, &index);
2394 if (!next || next->level <= node->level)
2395 break;
2396 }
3fd0a558
YZ
2397 if (!root)
2398 return NULL;
5d4f98a2 2399
3fd0a558
YZ
2400 *nr = index;
2401 next = node;
2402 /* setup backref node path for btrfs_reloc_cow_block */
2403 while (1) {
2404 rc->backref_cache.path[next->level] = next;
2405 if (--index < 0)
2406 break;
2407 next = edges[index]->node[UPPER];
5d4f98a2 2408 }
5d4f98a2
YZ
2409 return root;
2410}
2411
3fd0a558
YZ
2412/*
2413 * select a tree root for relocation. return NULL if the block
2414 * is reference counted. we should use do_relocation() in this
2415 * case. return a tree root pointer if the block isn't reference
2416 * counted. return -ENOENT if the block is root of reloc tree.
2417 */
5d4f98a2
YZ
2418static noinline_for_stack
2419struct btrfs_root *select_one_root(struct btrfs_trans_handle *trans,
2420 struct backref_node *node)
2421{
3fd0a558
YZ
2422 struct backref_node *next;
2423 struct btrfs_root *root;
2424 struct btrfs_root *fs_root = NULL;
5d4f98a2 2425 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
3fd0a558
YZ
2426 int index = 0;
2427
2428 next = node;
2429 while (1) {
2430 cond_resched();
2431 next = walk_up_backref(next, edges, &index);
2432 root = next->root;
2433 BUG_ON(!root);
2434
25985edc 2435 /* no other choice for non-references counted tree */
3fd0a558
YZ
2436 if (!root->ref_cows)
2437 return root;
2438
2439 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2440 fs_root = root;
2441
2442 if (next != node)
2443 return NULL;
2444
2445 next = walk_down_backref(edges, &index);
2446 if (!next || next->level <= node->level)
2447 break;
2448 }
2449
2450 if (!fs_root)
2451 return ERR_PTR(-ENOENT);
2452 return fs_root;
5d4f98a2
YZ
2453}
2454
2455static noinline_for_stack
3fd0a558
YZ
2456u64 calcu_metadata_size(struct reloc_control *rc,
2457 struct backref_node *node, int reserve)
5d4f98a2 2458{
3fd0a558
YZ
2459 struct backref_node *next = node;
2460 struct backref_edge *edge;
2461 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2462 u64 num_bytes = 0;
2463 int index = 0;
2464
2465 BUG_ON(reserve && node->processed);
2466
2467 while (next) {
2468 cond_resched();
2469 while (1) {
2470 if (next->processed && (reserve || next != node))
2471 break;
2472
2473 num_bytes += btrfs_level_size(rc->extent_root,
2474 next->level);
2475
2476 if (list_empty(&next->upper))
2477 break;
2478
2479 edge = list_entry(next->upper.next,
2480 struct backref_edge, list[LOWER]);
2481 edges[index++] = edge;
2482 next = edge->node[UPPER];
2483 }
2484 next = walk_down_backref(edges, &index);
2485 }
2486 return num_bytes;
5d4f98a2
YZ
2487}
2488
3fd0a558
YZ
2489static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2490 struct reloc_control *rc,
2491 struct backref_node *node)
5d4f98a2 2492{
3fd0a558
YZ
2493 struct btrfs_root *root = rc->extent_root;
2494 u64 num_bytes;
2495 int ret;
2496
2497 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
5d4f98a2 2498
3fd0a558 2499 trans->block_rsv = rc->block_rsv;
08e007d2
MX
2500 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2501 BTRFS_RESERVE_FLUSH_ALL);
3fd0a558
YZ
2502 if (ret) {
2503 if (ret == -EAGAIN)
2504 rc->commit_transaction = 1;
2505 return ret;
5d4f98a2 2506 }
3fd0a558 2507
3fd0a558
YZ
2508 return 0;
2509}
2510
2511static void release_metadata_space(struct reloc_control *rc,
2512 struct backref_node *node)
2513{
2514 u64 num_bytes = calcu_metadata_size(rc, node, 0) * 2;
2515 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, num_bytes);
5d4f98a2
YZ
2516}
2517
2518/*
2519 * relocate a block tree, and then update pointers in upper level
2520 * blocks that reference the block to point to the new location.
2521 *
2522 * if called by link_to_upper, the block has already been relocated.
2523 * in that case this function just updates pointers.
2524 */
2525static int do_relocation(struct btrfs_trans_handle *trans,
3fd0a558 2526 struct reloc_control *rc,
5d4f98a2
YZ
2527 struct backref_node *node,
2528 struct btrfs_key *key,
2529 struct btrfs_path *path, int lowest)
2530{
2531 struct backref_node *upper;
2532 struct backref_edge *edge;
2533 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2534 struct btrfs_root *root;
2535 struct extent_buffer *eb;
2536 u32 blocksize;
2537 u64 bytenr;
2538 u64 generation;
2539 int nr;
2540 int slot;
2541 int ret;
2542 int err = 0;
2543
2544 BUG_ON(lowest && node->eb);
2545
2546 path->lowest_level = node->level + 1;
3fd0a558 2547 rc->backref_cache.path[node->level] = node;
5d4f98a2
YZ
2548 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2549 cond_resched();
5d4f98a2
YZ
2550
2551 upper = edge->node[UPPER];
3fd0a558
YZ
2552 root = select_reloc_root(trans, rc, upper, edges, &nr);
2553 BUG_ON(!root);
2554
2555 if (upper->eb && !upper->locked) {
2556 if (!lowest) {
2557 ret = btrfs_bin_search(upper->eb, key,
2558 upper->level, &slot);
2559 BUG_ON(ret);
2560 bytenr = btrfs_node_blockptr(upper->eb, slot);
2561 if (node->eb->start == bytenr)
2562 goto next;
2563 }
5d4f98a2 2564 drop_node_buffer(upper);
3fd0a558 2565 }
5d4f98a2
YZ
2566
2567 if (!upper->eb) {
2568 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2569 if (ret < 0) {
2570 err = ret;
2571 break;
2572 }
2573 BUG_ON(ret > 0);
2574
3fd0a558
YZ
2575 if (!upper->eb) {
2576 upper->eb = path->nodes[upper->level];
2577 path->nodes[upper->level] = NULL;
2578 } else {
2579 BUG_ON(upper->eb != path->nodes[upper->level]);
2580 }
5d4f98a2 2581
3fd0a558
YZ
2582 upper->locked = 1;
2583 path->locks[upper->level] = 0;
5d4f98a2 2584
3fd0a558 2585 slot = path->slots[upper->level];
b3b4aa74 2586 btrfs_release_path(path);
5d4f98a2
YZ
2587 } else {
2588 ret = btrfs_bin_search(upper->eb, key, upper->level,
2589 &slot);
2590 BUG_ON(ret);
2591 }
2592
2593 bytenr = btrfs_node_blockptr(upper->eb, slot);
3fd0a558
YZ
2594 if (lowest) {
2595 BUG_ON(bytenr != node->bytenr);
5d4f98a2 2596 } else {
3fd0a558
YZ
2597 if (node->eb->start == bytenr)
2598 goto next;
5d4f98a2
YZ
2599 }
2600
2601 blocksize = btrfs_level_size(root, node->level);
2602 generation = btrfs_node_ptr_generation(upper->eb, slot);
2603 eb = read_tree_block(root, bytenr, blocksize, generation);
97d9a8a4
TI
2604 if (!eb) {
2605 err = -EIO;
2606 goto next;
2607 }
5d4f98a2
YZ
2608 btrfs_tree_lock(eb);
2609 btrfs_set_lock_blocking(eb);
2610
2611 if (!node->eb) {
2612 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2613 slot, &eb);
3fd0a558
YZ
2614 btrfs_tree_unlock(eb);
2615 free_extent_buffer(eb);
5d4f98a2
YZ
2616 if (ret < 0) {
2617 err = ret;
3fd0a558 2618 goto next;
5d4f98a2 2619 }
3fd0a558 2620 BUG_ON(node->eb != eb);
5d4f98a2
YZ
2621 } else {
2622 btrfs_set_node_blockptr(upper->eb, slot,
2623 node->eb->start);
2624 btrfs_set_node_ptr_generation(upper->eb, slot,
2625 trans->transid);
2626 btrfs_mark_buffer_dirty(upper->eb);
2627
2628 ret = btrfs_inc_extent_ref(trans, root,
2629 node->eb->start, blocksize,
2630 upper->eb->start,
2631 btrfs_header_owner(upper->eb),
66d7e7f0 2632 node->level, 0, 1);
5d4f98a2
YZ
2633 BUG_ON(ret);
2634
2635 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2636 BUG_ON(ret);
5d4f98a2 2637 }
3fd0a558
YZ
2638next:
2639 if (!upper->pending)
2640 drop_node_buffer(upper);
2641 else
2642 unlock_node_buffer(upper);
2643 if (err)
2644 break;
5d4f98a2 2645 }
3fd0a558
YZ
2646
2647 if (!err && node->pending) {
2648 drop_node_buffer(node);
2649 list_move_tail(&node->list, &rc->backref_cache.changed);
2650 node->pending = 0;
2651 }
2652
5d4f98a2 2653 path->lowest_level = 0;
3fd0a558 2654 BUG_ON(err == -ENOSPC);
5d4f98a2
YZ
2655 return err;
2656}
2657
2658static int link_to_upper(struct btrfs_trans_handle *trans,
3fd0a558 2659 struct reloc_control *rc,
5d4f98a2
YZ
2660 struct backref_node *node,
2661 struct btrfs_path *path)
2662{
2663 struct btrfs_key key;
5d4f98a2
YZ
2664
2665 btrfs_node_key_to_cpu(node->eb, &key, 0);
3fd0a558 2666 return do_relocation(trans, rc, node, &key, path, 0);
5d4f98a2
YZ
2667}
2668
2669static int finish_pending_nodes(struct btrfs_trans_handle *trans,
3fd0a558
YZ
2670 struct reloc_control *rc,
2671 struct btrfs_path *path, int err)
5d4f98a2 2672{
3fd0a558
YZ
2673 LIST_HEAD(list);
2674 struct backref_cache *cache = &rc->backref_cache;
5d4f98a2
YZ
2675 struct backref_node *node;
2676 int level;
2677 int ret;
5d4f98a2
YZ
2678
2679 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2680 while (!list_empty(&cache->pending[level])) {
2681 node = list_entry(cache->pending[level].next,
3fd0a558
YZ
2682 struct backref_node, list);
2683 list_move_tail(&node->list, &list);
2684 BUG_ON(!node->pending);
5d4f98a2 2685
3fd0a558
YZ
2686 if (!err) {
2687 ret = link_to_upper(trans, rc, node, path);
2688 if (ret < 0)
2689 err = ret;
2690 }
5d4f98a2 2691 }
3fd0a558 2692 list_splice_init(&list, &cache->pending[level]);
5d4f98a2 2693 }
5d4f98a2
YZ
2694 return err;
2695}
2696
2697static void mark_block_processed(struct reloc_control *rc,
3fd0a558
YZ
2698 u64 bytenr, u32 blocksize)
2699{
2700 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2701 EXTENT_DIRTY, GFP_NOFS);
2702}
2703
2704static void __mark_block_processed(struct reloc_control *rc,
2705 struct backref_node *node)
5d4f98a2
YZ
2706{
2707 u32 blocksize;
2708 if (node->level == 0 ||
2709 in_block_group(node->bytenr, rc->block_group)) {
2710 blocksize = btrfs_level_size(rc->extent_root, node->level);
3fd0a558 2711 mark_block_processed(rc, node->bytenr, blocksize);
5d4f98a2
YZ
2712 }
2713 node->processed = 1;
2714}
2715
2716/*
2717 * mark a block and all blocks directly/indirectly reference the block
2718 * as processed.
2719 */
2720static void update_processed_blocks(struct reloc_control *rc,
2721 struct backref_node *node)
2722{
2723 struct backref_node *next = node;
2724 struct backref_edge *edge;
2725 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2726 int index = 0;
2727
2728 while (next) {
2729 cond_resched();
2730 while (1) {
2731 if (next->processed)
2732 break;
2733
3fd0a558 2734 __mark_block_processed(rc, next);
5d4f98a2
YZ
2735
2736 if (list_empty(&next->upper))
2737 break;
2738
2739 edge = list_entry(next->upper.next,
2740 struct backref_edge, list[LOWER]);
2741 edges[index++] = edge;
2742 next = edge->node[UPPER];
2743 }
2744 next = walk_down_backref(edges, &index);
2745 }
2746}
2747
3fd0a558
YZ
2748static int tree_block_processed(u64 bytenr, u32 blocksize,
2749 struct reloc_control *rc)
2750{
2751 if (test_range_bit(&rc->processed_blocks, bytenr,
2752 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2753 return 1;
2754 return 0;
5d4f98a2
YZ
2755}
2756
2757static int get_tree_block_key(struct reloc_control *rc,
2758 struct tree_block *block)
2759{
2760 struct extent_buffer *eb;
2761
2762 BUG_ON(block->key_ready);
2763 eb = read_tree_block(rc->extent_root, block->bytenr,
2764 block->key.objectid, block->key.offset);
97d9a8a4 2765 BUG_ON(!eb);
5d4f98a2
YZ
2766 WARN_ON(btrfs_header_level(eb) != block->level);
2767 if (block->level == 0)
2768 btrfs_item_key_to_cpu(eb, &block->key, 0);
2769 else
2770 btrfs_node_key_to_cpu(eb, &block->key, 0);
2771 free_extent_buffer(eb);
2772 block->key_ready = 1;
2773 return 0;
2774}
2775
2776static int reada_tree_block(struct reloc_control *rc,
2777 struct tree_block *block)
2778{
2779 BUG_ON(block->key_ready);
3173a18f
JB
2780 if (block->key.type == BTRFS_METADATA_ITEM_KEY)
2781 readahead_tree_block(rc->extent_root, block->bytenr,
2782 block->key.objectid,
2783 rc->extent_root->leafsize);
2784 else
2785 readahead_tree_block(rc->extent_root, block->bytenr,
2786 block->key.objectid, block->key.offset);
5d4f98a2
YZ
2787 return 0;
2788}
2789
2790/*
2791 * helper function to relocate a tree block
2792 */
2793static int relocate_tree_block(struct btrfs_trans_handle *trans,
2794 struct reloc_control *rc,
2795 struct backref_node *node,
2796 struct btrfs_key *key,
2797 struct btrfs_path *path)
2798{
2799 struct btrfs_root *root;
3fd0a558
YZ
2800 int release = 0;
2801 int ret = 0;
2802
2803 if (!node)
2804 return 0;
5d4f98a2 2805
3fd0a558 2806 BUG_ON(node->processed);
5d4f98a2 2807 root = select_one_root(trans, node);
3fd0a558 2808 if (root == ERR_PTR(-ENOENT)) {
5d4f98a2 2809 update_processed_blocks(rc, node);
3fd0a558 2810 goto out;
5d4f98a2
YZ
2811 }
2812
3fd0a558
YZ
2813 if (!root || root->ref_cows) {
2814 ret = reserve_metadata_space(trans, rc, node);
2815 if (ret)
5d4f98a2 2816 goto out;
3fd0a558 2817 release = 1;
5d4f98a2
YZ
2818 }
2819
3fd0a558
YZ
2820 if (root) {
2821 if (root->ref_cows) {
2822 BUG_ON(node->new_bytenr);
2823 BUG_ON(!list_empty(&node->list));
2824 btrfs_record_root_in_trans(trans, root);
2825 root = root->reloc_root;
2826 node->new_bytenr = root->node->start;
2827 node->root = root;
2828 list_add_tail(&node->list, &rc->backref_cache.changed);
2829 } else {
2830 path->lowest_level = node->level;
2831 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
b3b4aa74 2832 btrfs_release_path(path);
3fd0a558
YZ
2833 if (ret > 0)
2834 ret = 0;
2835 }
2836 if (!ret)
2837 update_processed_blocks(rc, node);
2838 } else {
2839 ret = do_relocation(trans, rc, node, key, path, 1);
2840 }
5d4f98a2 2841out:
3fd0a558
YZ
2842 if (ret || node->level == 0 || node->cowonly) {
2843 if (release)
2844 release_metadata_space(rc, node);
2845 remove_backref_node(&rc->backref_cache, node);
2846 }
5d4f98a2
YZ
2847 return ret;
2848}
2849
2850/*
2851 * relocate a list of blocks
2852 */
2853static noinline_for_stack
2854int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2855 struct reloc_control *rc, struct rb_root *blocks)
2856{
5d4f98a2
YZ
2857 struct backref_node *node;
2858 struct btrfs_path *path;
2859 struct tree_block *block;
2860 struct rb_node *rb_node;
5d4f98a2
YZ
2861 int ret;
2862 int err = 0;
2863
2864 path = btrfs_alloc_path();
e1a12670
LB
2865 if (!path) {
2866 err = -ENOMEM;
2867 goto out_path;
2868 }
5d4f98a2 2869
5d4f98a2
YZ
2870 rb_node = rb_first(blocks);
2871 while (rb_node) {
2872 block = rb_entry(rb_node, struct tree_block, rb_node);
5d4f98a2
YZ
2873 if (!block->key_ready)
2874 reada_tree_block(rc, block);
2875 rb_node = rb_next(rb_node);
2876 }
2877
2878 rb_node = rb_first(blocks);
2879 while (rb_node) {
2880 block = rb_entry(rb_node, struct tree_block, rb_node);
2881 if (!block->key_ready)
2882 get_tree_block_key(rc, block);
2883 rb_node = rb_next(rb_node);
2884 }
2885
2886 rb_node = rb_first(blocks);
2887 while (rb_node) {
2888 block = rb_entry(rb_node, struct tree_block, rb_node);
2889
3fd0a558 2890 node = build_backref_tree(rc, &block->key,
5d4f98a2
YZ
2891 block->level, block->bytenr);
2892 if (IS_ERR(node)) {
2893 err = PTR_ERR(node);
2894 goto out;
2895 }
2896
2897 ret = relocate_tree_block(trans, rc, node, &block->key,
2898 path);
2899 if (ret < 0) {
3fd0a558
YZ
2900 if (ret != -EAGAIN || rb_node == rb_first(blocks))
2901 err = ret;
5d4f98a2
YZ
2902 goto out;
2903 }
5d4f98a2
YZ
2904 rb_node = rb_next(rb_node);
2905 }
5d4f98a2 2906out:
3fd0a558 2907 err = finish_pending_nodes(trans, rc, path, err);
5d4f98a2 2908
5d4f98a2 2909 btrfs_free_path(path);
e1a12670
LB
2910out_path:
2911 free_block_list(blocks);
5d4f98a2
YZ
2912 return err;
2913}
2914
efa56464
YZ
2915static noinline_for_stack
2916int prealloc_file_extent_cluster(struct inode *inode,
2917 struct file_extent_cluster *cluster)
2918{
2919 u64 alloc_hint = 0;
2920 u64 start;
2921 u64 end;
2922 u64 offset = BTRFS_I(inode)->index_cnt;
2923 u64 num_bytes;
2924 int nr = 0;
2925 int ret = 0;
2926
2927 BUG_ON(cluster->start != cluster->boundary[0]);
2928 mutex_lock(&inode->i_mutex);
2929
2930 ret = btrfs_check_data_free_space(inode, cluster->end +
2931 1 - cluster->start);
2932 if (ret)
2933 goto out;
2934
2935 while (nr < cluster->nr) {
2936 start = cluster->boundary[nr] - offset;
2937 if (nr + 1 < cluster->nr)
2938 end = cluster->boundary[nr + 1] - 1 - offset;
2939 else
2940 end = cluster->end - offset;
2941
d0082371 2942 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
efa56464
YZ
2943 num_bytes = end + 1 - start;
2944 ret = btrfs_prealloc_file_range(inode, 0, start,
2945 num_bytes, num_bytes,
2946 end + 1, &alloc_hint);
d0082371 2947 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
efa56464
YZ
2948 if (ret)
2949 break;
2950 nr++;
2951 }
2952 btrfs_free_reserved_data_space(inode, cluster->end +
2953 1 - cluster->start);
2954out:
2955 mutex_unlock(&inode->i_mutex);
2956 return ret;
2957}
2958
5d4f98a2 2959static noinline_for_stack
0257bb82
YZ
2960int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
2961 u64 block_start)
2962{
2963 struct btrfs_root *root = BTRFS_I(inode)->root;
2964 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2965 struct extent_map *em;
2966 int ret = 0;
2967
172ddd60 2968 em = alloc_extent_map();
0257bb82
YZ
2969 if (!em)
2970 return -ENOMEM;
2971
2972 em->start = start;
2973 em->len = end + 1 - start;
2974 em->block_len = em->len;
2975 em->block_start = block_start;
2976 em->bdev = root->fs_info->fs_devices->latest_bdev;
2977 set_bit(EXTENT_FLAG_PINNED, &em->flags);
2978
d0082371 2979 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
0257bb82
YZ
2980 while (1) {
2981 write_lock(&em_tree->lock);
2982 ret = add_extent_mapping(em_tree, em);
2983 write_unlock(&em_tree->lock);
2984 if (ret != -EEXIST) {
2985 free_extent_map(em);
2986 break;
2987 }
2988 btrfs_drop_extent_cache(inode, start, end, 0);
2989 }
d0082371 2990 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
0257bb82
YZ
2991 return ret;
2992}
2993
2994static int relocate_file_extent_cluster(struct inode *inode,
2995 struct file_extent_cluster *cluster)
5d4f98a2
YZ
2996{
2997 u64 page_start;
2998 u64 page_end;
0257bb82
YZ
2999 u64 offset = BTRFS_I(inode)->index_cnt;
3000 unsigned long index;
5d4f98a2 3001 unsigned long last_index;
5d4f98a2
YZ
3002 struct page *page;
3003 struct file_ra_state *ra;
3b16a4e3 3004 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
0257bb82 3005 int nr = 0;
5d4f98a2
YZ
3006 int ret = 0;
3007
0257bb82
YZ
3008 if (!cluster->nr)
3009 return 0;
3010
5d4f98a2
YZ
3011 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3012 if (!ra)
3013 return -ENOMEM;
3014
efa56464
YZ
3015 ret = prealloc_file_extent_cluster(inode, cluster);
3016 if (ret)
3017 goto out;
0257bb82 3018
efa56464 3019 file_ra_state_init(ra, inode->i_mapping);
5d4f98a2 3020
0257bb82
YZ
3021 ret = setup_extent_mapping(inode, cluster->start - offset,
3022 cluster->end - offset, cluster->start);
5d4f98a2 3023 if (ret)
efa56464 3024 goto out;
5d4f98a2 3025
efa56464
YZ
3026 index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
3027 last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
0257bb82 3028 while (index <= last_index) {
efa56464
YZ
3029 ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
3030 if (ret)
3031 goto out;
3032
0257bb82 3033 page = find_lock_page(inode->i_mapping, index);
5d4f98a2 3034 if (!page) {
0257bb82
YZ
3035 page_cache_sync_readahead(inode->i_mapping,
3036 ra, NULL, index,
3037 last_index + 1 - index);
a94733d0 3038 page = find_or_create_page(inode->i_mapping, index,
3b16a4e3 3039 mask);
0257bb82 3040 if (!page) {
efa56464
YZ
3041 btrfs_delalloc_release_metadata(inode,
3042 PAGE_CACHE_SIZE);
0257bb82 3043 ret = -ENOMEM;
efa56464 3044 goto out;
0257bb82 3045 }
5d4f98a2 3046 }
0257bb82
YZ
3047
3048 if (PageReadahead(page)) {
3049 page_cache_async_readahead(inode->i_mapping,
3050 ra, NULL, page, index,
3051 last_index + 1 - index);
3052 }
3053
5d4f98a2
YZ
3054 if (!PageUptodate(page)) {
3055 btrfs_readpage(NULL, page);
3056 lock_page(page);
3057 if (!PageUptodate(page)) {
3058 unlock_page(page);
3059 page_cache_release(page);
efa56464
YZ
3060 btrfs_delalloc_release_metadata(inode,
3061 PAGE_CACHE_SIZE);
5d4f98a2 3062 ret = -EIO;
efa56464 3063 goto out;
5d4f98a2
YZ
3064 }
3065 }
5d4f98a2 3066
4eee4fa4 3067 page_start = page_offset(page);
5d4f98a2 3068 page_end = page_start + PAGE_CACHE_SIZE - 1;
0257bb82 3069
d0082371 3070 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
0257bb82 3071
5d4f98a2
YZ
3072 set_page_extent_mapped(page);
3073
0257bb82
YZ
3074 if (nr < cluster->nr &&
3075 page_start + offset == cluster->boundary[nr]) {
3076 set_extent_bits(&BTRFS_I(inode)->io_tree,
3077 page_start, page_end,
5d4f98a2 3078 EXTENT_BOUNDARY, GFP_NOFS);
0257bb82
YZ
3079 nr++;
3080 }
5d4f98a2 3081
efa56464 3082 btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
5d4f98a2 3083 set_page_dirty(page);
5d4f98a2 3084
0257bb82 3085 unlock_extent(&BTRFS_I(inode)->io_tree,
d0082371 3086 page_start, page_end);
5d4f98a2
YZ
3087 unlock_page(page);
3088 page_cache_release(page);
0257bb82
YZ
3089
3090 index++;
efa56464
YZ
3091 balance_dirty_pages_ratelimited(inode->i_mapping);
3092 btrfs_throttle(BTRFS_I(inode)->root);
5d4f98a2 3093 }
0257bb82 3094 WARN_ON(nr != cluster->nr);
efa56464 3095out:
5d4f98a2 3096 kfree(ra);
5d4f98a2
YZ
3097 return ret;
3098}
3099
3100static noinline_for_stack
0257bb82
YZ
3101int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3102 struct file_extent_cluster *cluster)
5d4f98a2 3103{
0257bb82 3104 int ret;
5d4f98a2 3105
0257bb82
YZ
3106 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3107 ret = relocate_file_extent_cluster(inode, cluster);
3108 if (ret)
3109 return ret;
3110 cluster->nr = 0;
5d4f98a2 3111 }
5d4f98a2 3112
0257bb82
YZ
3113 if (!cluster->nr)
3114 cluster->start = extent_key->objectid;
3115 else
3116 BUG_ON(cluster->nr >= MAX_EXTENTS);
3117 cluster->end = extent_key->objectid + extent_key->offset - 1;
3118 cluster->boundary[cluster->nr] = extent_key->objectid;
3119 cluster->nr++;
3120
3121 if (cluster->nr >= MAX_EXTENTS) {
3122 ret = relocate_file_extent_cluster(inode, cluster);
3123 if (ret)
3124 return ret;
3125 cluster->nr = 0;
3126 }
3127 return 0;
5d4f98a2
YZ
3128}
3129
3130#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3131static int get_ref_objectid_v0(struct reloc_control *rc,
3132 struct btrfs_path *path,
3133 struct btrfs_key *extent_key,
3134 u64 *ref_objectid, int *path_change)
3135{
3136 struct btrfs_key key;
3137 struct extent_buffer *leaf;
3138 struct btrfs_extent_ref_v0 *ref0;
3139 int ret;
3140 int slot;
3141
3142 leaf = path->nodes[0];
3143 slot = path->slots[0];
3144 while (1) {
3145 if (slot >= btrfs_header_nritems(leaf)) {
3146 ret = btrfs_next_leaf(rc->extent_root, path);
3147 if (ret < 0)
3148 return ret;
3149 BUG_ON(ret > 0);
3150 leaf = path->nodes[0];
3151 slot = path->slots[0];
3152 if (path_change)
3153 *path_change = 1;
3154 }
3155 btrfs_item_key_to_cpu(leaf, &key, slot);
3156 if (key.objectid != extent_key->objectid)
3157 return -ENOENT;
3158
3159 if (key.type != BTRFS_EXTENT_REF_V0_KEY) {
3160 slot++;
3161 continue;
3162 }
3163 ref0 = btrfs_item_ptr(leaf, slot,
3164 struct btrfs_extent_ref_v0);
3165 *ref_objectid = btrfs_ref_objectid_v0(leaf, ref0);
3166 break;
3167 }
3168 return 0;
3169}
3170#endif
3171
3172/*
3173 * helper to add a tree block to the list.
3174 * the major work is getting the generation and level of the block
3175 */
3176static int add_tree_block(struct reloc_control *rc,
3177 struct btrfs_key *extent_key,
3178 struct btrfs_path *path,
3179 struct rb_root *blocks)
3180{
3181 struct extent_buffer *eb;
3182 struct btrfs_extent_item *ei;
3183 struct btrfs_tree_block_info *bi;
3184 struct tree_block *block;
3185 struct rb_node *rb_node;
3186 u32 item_size;
3187 int level = -1;
3188 int generation;
3189
3190 eb = path->nodes[0];
3191 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3192
3173a18f
JB
3193 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3194 item_size >= sizeof(*ei) + sizeof(*bi)) {
5d4f98a2
YZ
3195 ei = btrfs_item_ptr(eb, path->slots[0],
3196 struct btrfs_extent_item);
3173a18f
JB
3197 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3198 bi = (struct btrfs_tree_block_info *)(ei + 1);
3199 level = btrfs_tree_block_level(eb, bi);
3200 } else {
3201 level = (int)extent_key->offset;
3202 }
5d4f98a2 3203 generation = btrfs_extent_generation(eb, ei);
5d4f98a2
YZ
3204 } else {
3205#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3206 u64 ref_owner;
3207 int ret;
3208
3209 BUG_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3210 ret = get_ref_objectid_v0(rc, path, extent_key,
3211 &ref_owner, NULL);
411fc6bc
AK
3212 if (ret < 0)
3213 return ret;
5d4f98a2
YZ
3214 BUG_ON(ref_owner >= BTRFS_MAX_LEVEL);
3215 level = (int)ref_owner;
3216 /* FIXME: get real generation */
3217 generation = 0;
3218#else
3219 BUG();
3220#endif
3221 }
3222
b3b4aa74 3223 btrfs_release_path(path);
5d4f98a2
YZ
3224
3225 BUG_ON(level == -1);
3226
3227 block = kmalloc(sizeof(*block), GFP_NOFS);
3228 if (!block)
3229 return -ENOMEM;
3230
3231 block->bytenr = extent_key->objectid;
3173a18f 3232 block->key.objectid = rc->extent_root->leafsize;
5d4f98a2
YZ
3233 block->key.offset = generation;
3234 block->level = level;
3235 block->key_ready = 0;
3236
3237 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
43c04fb1
JM
3238 if (rb_node)
3239 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
5d4f98a2
YZ
3240
3241 return 0;
3242}
3243
3244/*
3245 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3246 */
3247static int __add_tree_block(struct reloc_control *rc,
3248 u64 bytenr, u32 blocksize,
3249 struct rb_root *blocks)
3250{
3251 struct btrfs_path *path;
3252 struct btrfs_key key;
3253 int ret;
3254
3255 if (tree_block_processed(bytenr, blocksize, rc))
3256 return 0;
3257
3258 if (tree_search(blocks, bytenr))
3259 return 0;
3260
3261 path = btrfs_alloc_path();
3262 if (!path)
3263 return -ENOMEM;
3264
3265 key.objectid = bytenr;
3266 key.type = BTRFS_EXTENT_ITEM_KEY;
3267 key.offset = blocksize;
3268
3269 path->search_commit_root = 1;
3270 path->skip_locking = 1;
3271 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3272 if (ret < 0)
3273 goto out;
5d4f98a2
YZ
3274
3275 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
3173a18f
JB
3276 if (ret > 0) {
3277 if (key.objectid == bytenr &&
3278 key.type == BTRFS_METADATA_ITEM_KEY)
3279 ret = 0;
3280 }
3281 BUG_ON(ret);
3282
5d4f98a2
YZ
3283 ret = add_tree_block(rc, &key, path, blocks);
3284out:
3285 btrfs_free_path(path);
3286 return ret;
3287}
3288
3289/*
3290 * helper to check if the block use full backrefs for pointers in it
3291 */
3292static int block_use_full_backref(struct reloc_control *rc,
3293 struct extent_buffer *eb)
3294{
5d4f98a2
YZ
3295 u64 flags;
3296 int ret;
3297
3298 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
3299 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
3300 return 1;
3301
3fd0a558 3302 ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
3173a18f
JB
3303 eb->start, btrfs_header_level(eb), 1,
3304 NULL, &flags);
5d4f98a2
YZ
3305 BUG_ON(ret);
3306
5d4f98a2
YZ
3307 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3308 ret = 1;
3309 else
3310 ret = 0;
5d4f98a2
YZ
3311 return ret;
3312}
3313
0af3d00b
JB
3314static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3315 struct inode *inode, u64 ino)
3316{
3317 struct btrfs_key key;
3318 struct btrfs_path *path;
3319 struct btrfs_root *root = fs_info->tree_root;
3320 struct btrfs_trans_handle *trans;
0af3d00b
JB
3321 int ret = 0;
3322
3323 if (inode)
3324 goto truncate;
3325
3326 key.objectid = ino;
3327 key.type = BTRFS_INODE_ITEM_KEY;
3328 key.offset = 0;
3329
3330 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
f54fb859
TI
3331 if (IS_ERR(inode) || is_bad_inode(inode)) {
3332 if (!IS_ERR(inode))
0af3d00b
JB
3333 iput(inode);
3334 return -ENOENT;
3335 }
3336
3337truncate:
3338 path = btrfs_alloc_path();
3339 if (!path) {
3340 ret = -ENOMEM;
3341 goto out;
3342 }
3343
7a7eaa40 3344 trans = btrfs_join_transaction(root);
0af3d00b
JB
3345 if (IS_ERR(trans)) {
3346 btrfs_free_path(path);
3612b495 3347 ret = PTR_ERR(trans);
0af3d00b
JB
3348 goto out;
3349 }
3350
3351 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
3352
3353 btrfs_free_path(path);
0af3d00b 3354 btrfs_end_transaction(trans, root);
b53d3f5d 3355 btrfs_btree_balance_dirty(root);
0af3d00b
JB
3356out:
3357 iput(inode);
3358 return ret;
3359}
3360
5d4f98a2
YZ
3361/*
3362 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3363 * this function scans fs tree to find blocks reference the data extent
3364 */
3365static int find_data_references(struct reloc_control *rc,
3366 struct btrfs_key *extent_key,
3367 struct extent_buffer *leaf,
3368 struct btrfs_extent_data_ref *ref,
3369 struct rb_root *blocks)
3370{
3371 struct btrfs_path *path;
3372 struct tree_block *block;
3373 struct btrfs_root *root;
3374 struct btrfs_file_extent_item *fi;
3375 struct rb_node *rb_node;
3376 struct btrfs_key key;
3377 u64 ref_root;
3378 u64 ref_objectid;
3379 u64 ref_offset;
3380 u32 ref_count;
3381 u32 nritems;
3382 int err = 0;
3383 int added = 0;
3384 int counted;
3385 int ret;
3386
5d4f98a2
YZ
3387 ref_root = btrfs_extent_data_ref_root(leaf, ref);
3388 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3389 ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3390 ref_count = btrfs_extent_data_ref_count(leaf, ref);
3391
0af3d00b
JB
3392 /*
3393 * This is an extent belonging to the free space cache, lets just delete
3394 * it and redo the search.
3395 */
3396 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
3397 ret = delete_block_group_cache(rc->extent_root->fs_info,
3398 NULL, ref_objectid);
3399 if (ret != -ENOENT)
3400 return ret;
3401 ret = 0;
3402 }
3403
3404 path = btrfs_alloc_path();
3405 if (!path)
3406 return -ENOMEM;
026fd317 3407 path->reada = 1;
0af3d00b 3408
5d4f98a2
YZ
3409 root = read_fs_root(rc->extent_root->fs_info, ref_root);
3410 if (IS_ERR(root)) {
3411 err = PTR_ERR(root);
3412 goto out;
3413 }
3414
3415 key.objectid = ref_objectid;
5d4f98a2 3416 key.type = BTRFS_EXTENT_DATA_KEY;
84850e8d
YZ
3417 if (ref_offset > ((u64)-1 << 32))
3418 key.offset = 0;
3419 else
3420 key.offset = ref_offset;
5d4f98a2
YZ
3421
3422 path->search_commit_root = 1;
3423 path->skip_locking = 1;
3424 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3425 if (ret < 0) {
3426 err = ret;
3427 goto out;
3428 }
3429
3430 leaf = path->nodes[0];
3431 nritems = btrfs_header_nritems(leaf);
3432 /*
3433 * the references in tree blocks that use full backrefs
3434 * are not counted in
3435 */
3436 if (block_use_full_backref(rc, leaf))
3437 counted = 0;
3438 else
3439 counted = 1;
3440 rb_node = tree_search(blocks, leaf->start);
3441 if (rb_node) {
3442 if (counted)
3443 added = 1;
3444 else
3445 path->slots[0] = nritems;
3446 }
3447
3448 while (ref_count > 0) {
3449 while (path->slots[0] >= nritems) {
3450 ret = btrfs_next_leaf(root, path);
3451 if (ret < 0) {
3452 err = ret;
3453 goto out;
3454 }
3455 if (ret > 0) {
3456 WARN_ON(1);
3457 goto out;
3458 }
3459
3460 leaf = path->nodes[0];
3461 nritems = btrfs_header_nritems(leaf);
3462 added = 0;
3463
3464 if (block_use_full_backref(rc, leaf))
3465 counted = 0;
3466 else
3467 counted = 1;
3468 rb_node = tree_search(blocks, leaf->start);
3469 if (rb_node) {
3470 if (counted)
3471 added = 1;
3472 else
3473 path->slots[0] = nritems;
3474 }
3475 }
3476
3477 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3478 if (key.objectid != ref_objectid ||
3479 key.type != BTRFS_EXTENT_DATA_KEY) {
3480 WARN_ON(1);
3481 break;
3482 }
3483
3484 fi = btrfs_item_ptr(leaf, path->slots[0],
3485 struct btrfs_file_extent_item);
3486
3487 if (btrfs_file_extent_type(leaf, fi) ==
3488 BTRFS_FILE_EXTENT_INLINE)
3489 goto next;
3490
3491 if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3492 extent_key->objectid)
3493 goto next;
3494
3495 key.offset -= btrfs_file_extent_offset(leaf, fi);
3496 if (key.offset != ref_offset)
3497 goto next;
3498
3499 if (counted)
3500 ref_count--;
3501 if (added)
3502 goto next;
3503
3504 if (!tree_block_processed(leaf->start, leaf->len, rc)) {
3505 block = kmalloc(sizeof(*block), GFP_NOFS);
3506 if (!block) {
3507 err = -ENOMEM;
3508 break;
3509 }
3510 block->bytenr = leaf->start;
3511 btrfs_item_key_to_cpu(leaf, &block->key, 0);
3512 block->level = 0;
3513 block->key_ready = 1;
3514 rb_node = tree_insert(blocks, block->bytenr,
3515 &block->rb_node);
43c04fb1
JM
3516 if (rb_node)
3517 backref_tree_panic(rb_node, -EEXIST,
3518 block->bytenr);
5d4f98a2
YZ
3519 }
3520 if (counted)
3521 added = 1;
3522 else
3523 path->slots[0] = nritems;
3524next:
3525 path->slots[0]++;
3526
3527 }
3528out:
3529 btrfs_free_path(path);
3530 return err;
3531}
3532
3533/*
2c016dc2 3534 * helper to find all tree blocks that reference a given data extent
5d4f98a2
YZ
3535 */
3536static noinline_for_stack
3537int add_data_references(struct reloc_control *rc,
3538 struct btrfs_key *extent_key,
3539 struct btrfs_path *path,
3540 struct rb_root *blocks)
3541{
3542 struct btrfs_key key;
3543 struct extent_buffer *eb;
3544 struct btrfs_extent_data_ref *dref;
3545 struct btrfs_extent_inline_ref *iref;
3546 unsigned long ptr;
3547 unsigned long end;
3fd0a558 3548 u32 blocksize = btrfs_level_size(rc->extent_root, 0);
5d4f98a2
YZ
3549 int ret;
3550 int err = 0;
3551
5d4f98a2
YZ
3552 eb = path->nodes[0];
3553 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3554 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3555#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3556 if (ptr + sizeof(struct btrfs_extent_item_v0) == end)
3557 ptr = end;
3558 else
3559#endif
3560 ptr += sizeof(struct btrfs_extent_item);
3561
3562 while (ptr < end) {
3563 iref = (struct btrfs_extent_inline_ref *)ptr;
3564 key.type = btrfs_extent_inline_ref_type(eb, iref);
3565 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3566 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3567 ret = __add_tree_block(rc, key.offset, blocksize,
3568 blocks);
3569 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3570 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
3571 ret = find_data_references(rc, extent_key,
3572 eb, dref, blocks);
3573 } else {
3574 BUG();
3575 }
3576 ptr += btrfs_extent_inline_ref_size(key.type);
3577 }
3578 WARN_ON(ptr > end);
3579
3580 while (1) {
3581 cond_resched();
3582 eb = path->nodes[0];
3583 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3584 ret = btrfs_next_leaf(rc->extent_root, path);
3585 if (ret < 0) {
3586 err = ret;
3587 break;
3588 }
3589 if (ret > 0)
3590 break;
3591 eb = path->nodes[0];
3592 }
3593
3594 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3595 if (key.objectid != extent_key->objectid)
3596 break;
3597
3598#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3599 if (key.type == BTRFS_SHARED_DATA_REF_KEY ||
3600 key.type == BTRFS_EXTENT_REF_V0_KEY) {
3601#else
3602 BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY);
3603 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3604#endif
3605 ret = __add_tree_block(rc, key.offset, blocksize,
3606 blocks);
3607 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3608 dref = btrfs_item_ptr(eb, path->slots[0],
3609 struct btrfs_extent_data_ref);
3610 ret = find_data_references(rc, extent_key,
3611 eb, dref, blocks);
3612 } else {
3613 ret = 0;
3614 }
3615 if (ret) {
3616 err = ret;
3617 break;
3618 }
3619 path->slots[0]++;
3620 }
b3b4aa74 3621 btrfs_release_path(path);
5d4f98a2
YZ
3622 if (err)
3623 free_block_list(blocks);
3624 return err;
3625}
3626
3627/*
2c016dc2 3628 * helper to find next unprocessed extent
5d4f98a2
YZ
3629 */
3630static noinline_for_stack
3631int find_next_extent(struct btrfs_trans_handle *trans,
3fd0a558
YZ
3632 struct reloc_control *rc, struct btrfs_path *path,
3633 struct btrfs_key *extent_key)
5d4f98a2
YZ
3634{
3635 struct btrfs_key key;
3636 struct extent_buffer *leaf;
3637 u64 start, end, last;
3638 int ret;
3639
3640 last = rc->block_group->key.objectid + rc->block_group->key.offset;
3641 while (1) {
3642 cond_resched();
3643 if (rc->search_start >= last) {
3644 ret = 1;
3645 break;
3646 }
3647
3648 key.objectid = rc->search_start;
3649 key.type = BTRFS_EXTENT_ITEM_KEY;
3650 key.offset = 0;
3651
3652 path->search_commit_root = 1;
3653 path->skip_locking = 1;
3654 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3655 0, 0);
3656 if (ret < 0)
3657 break;
3658next:
3659 leaf = path->nodes[0];
3660 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3661 ret = btrfs_next_leaf(rc->extent_root, path);
3662 if (ret != 0)
3663 break;
3664 leaf = path->nodes[0];
3665 }
3666
3667 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3668 if (key.objectid >= last) {
3669 ret = 1;
3670 break;
3671 }
3672
3173a18f
JB
3673 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3674 key.type != BTRFS_METADATA_ITEM_KEY) {
3675 path->slots[0]++;
3676 goto next;
3677 }
3678
3679 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
5d4f98a2
YZ
3680 key.objectid + key.offset <= rc->search_start) {
3681 path->slots[0]++;
3682 goto next;
3683 }
3684
3173a18f
JB
3685 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3686 key.objectid + rc->extent_root->leafsize <=
3687 rc->search_start) {
3688 path->slots[0]++;
3689 goto next;
3690 }
3691
5d4f98a2
YZ
3692 ret = find_first_extent_bit(&rc->processed_blocks,
3693 key.objectid, &start, &end,
e6138876 3694 EXTENT_DIRTY, NULL);
5d4f98a2
YZ
3695
3696 if (ret == 0 && start <= key.objectid) {
b3b4aa74 3697 btrfs_release_path(path);
5d4f98a2
YZ
3698 rc->search_start = end + 1;
3699 } else {
3173a18f
JB
3700 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3701 rc->search_start = key.objectid + key.offset;
3702 else
3703 rc->search_start = key.objectid +
3704 rc->extent_root->leafsize;
3fd0a558 3705 memcpy(extent_key, &key, sizeof(key));
5d4f98a2
YZ
3706 return 0;
3707 }
3708 }
b3b4aa74 3709 btrfs_release_path(path);
5d4f98a2
YZ
3710 return ret;
3711}
3712
3713static void set_reloc_control(struct reloc_control *rc)
3714{
3715 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
7585717f
CM
3716
3717 mutex_lock(&fs_info->reloc_mutex);
5d4f98a2 3718 fs_info->reloc_ctl = rc;
7585717f 3719 mutex_unlock(&fs_info->reloc_mutex);
5d4f98a2
YZ
3720}
3721
3722static void unset_reloc_control(struct reloc_control *rc)
3723{
3724 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
7585717f
CM
3725
3726 mutex_lock(&fs_info->reloc_mutex);
5d4f98a2 3727 fs_info->reloc_ctl = NULL;
7585717f 3728 mutex_unlock(&fs_info->reloc_mutex);
5d4f98a2
YZ
3729}
3730
3731static int check_extent_flags(u64 flags)
3732{
3733 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3734 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3735 return 1;
3736 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3737 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3738 return 1;
3739 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3740 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3741 return 1;
3742 return 0;
3743}
3744
3fd0a558
YZ
3745static noinline_for_stack
3746int prepare_to_relocate(struct reloc_control *rc)
3747{
3748 struct btrfs_trans_handle *trans;
3749 int ret;
3750
66d8f3dd
MX
3751 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
3752 BTRFS_BLOCK_RSV_TEMP);
3fd0a558
YZ
3753 if (!rc->block_rsv)
3754 return -ENOMEM;
3755
3756 /*
3757 * reserve some space for creating reloc trees.
3758 * btrfs_init_reloc_root will use them when there
3759 * is no reservation in transaction handle.
3760 */
4a92b1b8 3761 ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
08e007d2
MX
3762 rc->extent_root->nodesize * 256,
3763 BTRFS_RESERVE_FLUSH_ALL);
3fd0a558
YZ
3764 if (ret)
3765 return ret;
3766
3fd0a558
YZ
3767 memset(&rc->cluster, 0, sizeof(rc->cluster));
3768 rc->search_start = rc->block_group->key.objectid;
3769 rc->extents_found = 0;
3770 rc->nodes_relocated = 0;
3771 rc->merging_rsv_size = 0;
3fd0a558
YZ
3772
3773 rc->create_reloc_tree = 1;
3774 set_reloc_control(rc);
3775
7a7eaa40 3776 trans = btrfs_join_transaction(rc->extent_root);
28818947
LB
3777 if (IS_ERR(trans)) {
3778 unset_reloc_control(rc);
3779 /*
3780 * extent tree is not a ref_cow tree and has no reloc_root to
3781 * cleanup. And callers are responsible to free the above
3782 * block rsv.
3783 */
3784 return PTR_ERR(trans);
3785 }
3fd0a558
YZ
3786 btrfs_commit_transaction(trans, rc->extent_root);
3787 return 0;
3788}
76dda93c 3789
5d4f98a2
YZ
3790static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3791{
3792 struct rb_root blocks = RB_ROOT;
3793 struct btrfs_key key;
3794 struct btrfs_trans_handle *trans = NULL;
3795 struct btrfs_path *path;
3796 struct btrfs_extent_item *ei;
5d4f98a2
YZ
3797 u64 flags;
3798 u32 item_size;
3799 int ret;
3800 int err = 0;
c87f08ca 3801 int progress = 0;
5d4f98a2
YZ
3802
3803 path = btrfs_alloc_path();
3fd0a558 3804 if (!path)
5d4f98a2 3805 return -ENOMEM;
026fd317 3806 path->reada = 1;
5d4f98a2 3807
3fd0a558
YZ
3808 ret = prepare_to_relocate(rc);
3809 if (ret) {
3810 err = ret;
3811 goto out_free;
3812 }
5d4f98a2
YZ
3813
3814 while (1) {
c87f08ca 3815 progress++;
a22285a6 3816 trans = btrfs_start_transaction(rc->extent_root, 0);
0f788c58
LB
3817 if (IS_ERR(trans)) {
3818 err = PTR_ERR(trans);
3819 trans = NULL;
3820 break;
3821 }
c87f08ca 3822restart:
3fd0a558
YZ
3823 if (update_backref_cache(trans, &rc->backref_cache)) {
3824 btrfs_end_transaction(trans, rc->extent_root);
3825 continue;
3826 }
3827
3828 ret = find_next_extent(trans, rc, path, &key);
5d4f98a2
YZ
3829 if (ret < 0)
3830 err = ret;
3831 if (ret != 0)
3832 break;
3833
3834 rc->extents_found++;
3835
3836 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3837 struct btrfs_extent_item);
3fd0a558 3838 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
5d4f98a2
YZ
3839 if (item_size >= sizeof(*ei)) {
3840 flags = btrfs_extent_flags(path->nodes[0], ei);
3841 ret = check_extent_flags(flags);
3842 BUG_ON(ret);
3843
3844 } else {
3845#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3846 u64 ref_owner;
3847 int path_change = 0;
3848
3849 BUG_ON(item_size !=
3850 sizeof(struct btrfs_extent_item_v0));
3851 ret = get_ref_objectid_v0(rc, path, &key, &ref_owner,
3852 &path_change);
3853 if (ref_owner < BTRFS_FIRST_FREE_OBJECTID)
3854 flags = BTRFS_EXTENT_FLAG_TREE_BLOCK;
3855 else
3856 flags = BTRFS_EXTENT_FLAG_DATA;
3857
3858 if (path_change) {
b3b4aa74 3859 btrfs_release_path(path);
5d4f98a2
YZ
3860
3861 path->search_commit_root = 1;
3862 path->skip_locking = 1;
3863 ret = btrfs_search_slot(NULL, rc->extent_root,
3864 &key, path, 0, 0);
3865 if (ret < 0) {
3866 err = ret;
3867 break;
3868 }
3869 BUG_ON(ret > 0);
3870 }
3871#else
3872 BUG();
3873#endif
3874 }
3875
3876 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3877 ret = add_tree_block(rc, &key, path, &blocks);
3878 } else if (rc->stage == UPDATE_DATA_PTRS &&
3fd0a558 3879 (flags & BTRFS_EXTENT_FLAG_DATA)) {
5d4f98a2
YZ
3880 ret = add_data_references(rc, &key, path, &blocks);
3881 } else {
b3b4aa74 3882 btrfs_release_path(path);
5d4f98a2
YZ
3883 ret = 0;
3884 }
3885 if (ret < 0) {
3fd0a558 3886 err = ret;
5d4f98a2
YZ
3887 break;
3888 }
3889
3890 if (!RB_EMPTY_ROOT(&blocks)) {
3891 ret = relocate_tree_blocks(trans, rc, &blocks);
3892 if (ret < 0) {
3fd0a558
YZ
3893 if (ret != -EAGAIN) {
3894 err = ret;
3895 break;
3896 }
3897 rc->extents_found--;
3898 rc->search_start = key.objectid;
3899 }
3900 }
3901
36ba022a 3902 ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
3fd0a558 3903 if (ret < 0) {
7654b724 3904 if (ret != -ENOSPC) {
5d4f98a2 3905 err = ret;
3fd0a558 3906 WARN_ON(1);
5d4f98a2
YZ
3907 break;
3908 }
3fd0a558 3909 rc->commit_transaction = 1;
5d4f98a2
YZ
3910 }
3911
3fd0a558
YZ
3912 if (rc->commit_transaction) {
3913 rc->commit_transaction = 0;
3914 ret = btrfs_commit_transaction(trans, rc->extent_root);
3915 BUG_ON(ret);
3916 } else {
3fd0a558 3917 btrfs_end_transaction_throttle(trans, rc->extent_root);
b53d3f5d 3918 btrfs_btree_balance_dirty(rc->extent_root);
3fd0a558 3919 }
5d4f98a2 3920 trans = NULL;
5d4f98a2
YZ
3921
3922 if (rc->stage == MOVE_DATA_EXTENTS &&
3923 (flags & BTRFS_EXTENT_FLAG_DATA)) {
3924 rc->found_file_extent = 1;
0257bb82 3925 ret = relocate_data_extent(rc->data_inode,
3fd0a558 3926 &key, &rc->cluster);
5d4f98a2
YZ
3927 if (ret < 0) {
3928 err = ret;
3929 break;
3930 }
3931 }
3932 }
c87f08ca
CM
3933 if (trans && progress && err == -ENOSPC) {
3934 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
3935 rc->block_group->flags);
3936 if (ret == 0) {
3937 err = 0;
3938 progress = 0;
3939 goto restart;
3940 }
3941 }
3fd0a558 3942
b3b4aa74 3943 btrfs_release_path(path);
3fd0a558
YZ
3944 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
3945 GFP_NOFS);
5d4f98a2
YZ
3946
3947 if (trans) {
3fd0a558 3948 btrfs_end_transaction_throttle(trans, rc->extent_root);
b53d3f5d 3949 btrfs_btree_balance_dirty(rc->extent_root);
5d4f98a2
YZ
3950 }
3951
0257bb82 3952 if (!err) {
3fd0a558
YZ
3953 ret = relocate_file_extent_cluster(rc->data_inode,
3954 &rc->cluster);
0257bb82
YZ
3955 if (ret < 0)
3956 err = ret;
3957 }
3958
3fd0a558
YZ
3959 rc->create_reloc_tree = 0;
3960 set_reloc_control(rc);
0257bb82 3961
3fd0a558
YZ
3962 backref_cache_cleanup(&rc->backref_cache);
3963 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
5d4f98a2 3964
3fd0a558 3965 err = prepare_to_merge(rc, err);
5d4f98a2
YZ
3966
3967 merge_reloc_roots(rc);
3968
3fd0a558 3969 rc->merge_reloc_tree = 0;
5d4f98a2 3970 unset_reloc_control(rc);
3fd0a558 3971 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
5d4f98a2
YZ
3972
3973 /* get rid of pinned extents */
7a7eaa40 3974 trans = btrfs_join_transaction(rc->extent_root);
3612b495
TI
3975 if (IS_ERR(trans))
3976 err = PTR_ERR(trans);
3977 else
3978 btrfs_commit_transaction(trans, rc->extent_root);
3fd0a558
YZ
3979out_free:
3980 btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
3981 btrfs_free_path(path);
5d4f98a2
YZ
3982 return err;
3983}
3984
3985static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
0257bb82 3986 struct btrfs_root *root, u64 objectid)
5d4f98a2
YZ
3987{
3988 struct btrfs_path *path;
3989 struct btrfs_inode_item *item;
3990 struct extent_buffer *leaf;
3991 int ret;
3992
3993 path = btrfs_alloc_path();
3994 if (!path)
3995 return -ENOMEM;
3996
3997 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3998 if (ret)
3999 goto out;
4000
4001 leaf = path->nodes[0];
4002 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4003 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
4004 btrfs_set_inode_generation(leaf, item, 1);
0257bb82 4005 btrfs_set_inode_size(leaf, item, 0);
5d4f98a2 4006 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3fd0a558
YZ
4007 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4008 BTRFS_INODE_PREALLOC);
5d4f98a2 4009 btrfs_mark_buffer_dirty(leaf);
b3b4aa74 4010 btrfs_release_path(path);
5d4f98a2
YZ
4011out:
4012 btrfs_free_path(path);
4013 return ret;
4014}
4015
4016/*
4017 * helper to create inode for data relocation.
4018 * the inode is in data relocation tree and its link count is 0
4019 */
3fd0a558
YZ
4020static noinline_for_stack
4021struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4022 struct btrfs_block_group_cache *group)
5d4f98a2
YZ
4023{
4024 struct inode *inode = NULL;
4025 struct btrfs_trans_handle *trans;
4026 struct btrfs_root *root;
4027 struct btrfs_key key;
5d4f98a2
YZ
4028 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
4029 int err = 0;
4030
4031 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4032 if (IS_ERR(root))
4033 return ERR_CAST(root);
4034
a22285a6 4035 trans = btrfs_start_transaction(root, 6);
3fd0a558
YZ
4036 if (IS_ERR(trans))
4037 return ERR_CAST(trans);
5d4f98a2 4038
581bb050 4039 err = btrfs_find_free_objectid(root, &objectid);
5d4f98a2
YZ
4040 if (err)
4041 goto out;
4042
0257bb82 4043 err = __insert_orphan_inode(trans, root, objectid);
5d4f98a2
YZ
4044 BUG_ON(err);
4045
4046 key.objectid = objectid;
4047 key.type = BTRFS_INODE_ITEM_KEY;
4048 key.offset = 0;
73f73415 4049 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
5d4f98a2
YZ
4050 BUG_ON(IS_ERR(inode) || is_bad_inode(inode));
4051 BTRFS_I(inode)->index_cnt = group->key.objectid;
4052
4053 err = btrfs_orphan_add(trans, inode);
4054out:
5d4f98a2 4055 btrfs_end_transaction(trans, root);
b53d3f5d 4056 btrfs_btree_balance_dirty(root);
5d4f98a2
YZ
4057 if (err) {
4058 if (inode)
4059 iput(inode);
4060 inode = ERR_PTR(err);
4061 }
4062 return inode;
4063}
4064
3fd0a558
YZ
4065static struct reloc_control *alloc_reloc_control(void)
4066{
4067 struct reloc_control *rc;
4068
4069 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4070 if (!rc)
4071 return NULL;
4072
4073 INIT_LIST_HEAD(&rc->reloc_roots);
4074 backref_cache_init(&rc->backref_cache);
4075 mapping_tree_init(&rc->reloc_root_tree);
f993c883 4076 extent_io_tree_init(&rc->processed_blocks, NULL);
3fd0a558
YZ
4077 return rc;
4078}
4079
5d4f98a2
YZ
4080/*
4081 * function to relocate all extents in a block group.
4082 */
4083int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
4084{
4085 struct btrfs_fs_info *fs_info = extent_root->fs_info;
4086 struct reloc_control *rc;
0af3d00b
JB
4087 struct inode *inode;
4088 struct btrfs_path *path;
5d4f98a2 4089 int ret;
f0486c68 4090 int rw = 0;
5d4f98a2
YZ
4091 int err = 0;
4092
3fd0a558 4093 rc = alloc_reloc_control();
5d4f98a2
YZ
4094 if (!rc)
4095 return -ENOMEM;
4096
f0486c68 4097 rc->extent_root = extent_root;
3fd0a558 4098
5d4f98a2
YZ
4099 rc->block_group = btrfs_lookup_block_group(fs_info, group_start);
4100 BUG_ON(!rc->block_group);
4101
f0486c68
YZ
4102 if (!rc->block_group->ro) {
4103 ret = btrfs_set_block_group_ro(extent_root, rc->block_group);
4104 if (ret) {
4105 err = ret;
4106 goto out;
4107 }
4108 rw = 1;
4109 }
4110
0af3d00b
JB
4111 path = btrfs_alloc_path();
4112 if (!path) {
4113 err = -ENOMEM;
4114 goto out;
4115 }
4116
4117 inode = lookup_free_space_inode(fs_info->tree_root, rc->block_group,
4118 path);
4119 btrfs_free_path(path);
4120
4121 if (!IS_ERR(inode))
4122 ret = delete_block_group_cache(fs_info, inode, 0);
4123 else
4124 ret = PTR_ERR(inode);
4125
4126 if (ret && ret != -ENOENT) {
4127 err = ret;
4128 goto out;
4129 }
4130
5d4f98a2
YZ
4131 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4132 if (IS_ERR(rc->data_inode)) {
4133 err = PTR_ERR(rc->data_inode);
4134 rc->data_inode = NULL;
4135 goto out;
4136 }
4137
4138 printk(KERN_INFO "btrfs: relocating block group %llu flags %llu\n",
4139 (unsigned long long)rc->block_group->key.objectid,
4140 (unsigned long long)rc->block_group->flags);
4141
8ccf6f19
MX
4142 ret = btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
4143 if (ret < 0) {
4144 err = ret;
4145 goto out;
4146 }
6bbe3a9c 4147 btrfs_wait_ordered_extents(fs_info->tree_root, 0);
5d4f98a2
YZ
4148
4149 while (1) {
76dda93c 4150 mutex_lock(&fs_info->cleaner_mutex);
5d4f98a2 4151 ret = relocate_block_group(rc);
76dda93c 4152 mutex_unlock(&fs_info->cleaner_mutex);
5d4f98a2
YZ
4153 if (ret < 0) {
4154 err = ret;
3fd0a558 4155 goto out;
5d4f98a2
YZ
4156 }
4157
4158 if (rc->extents_found == 0)
4159 break;
4160
4161 printk(KERN_INFO "btrfs: found %llu extents\n",
4162 (unsigned long long)rc->extents_found);
4163
4164 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4165 btrfs_wait_ordered_range(rc->data_inode, 0, (u64)-1);
4166 invalidate_mapping_pages(rc->data_inode->i_mapping,
4167 0, -1);
4168 rc->stage = UPDATE_DATA_PTRS;
5d4f98a2
YZ
4169 }
4170 }
4171
0257bb82
YZ
4172 filemap_write_and_wait_range(fs_info->btree_inode->i_mapping,
4173 rc->block_group->key.objectid,
4174 rc->block_group->key.objectid +
4175 rc->block_group->key.offset - 1);
5d4f98a2
YZ
4176
4177 WARN_ON(rc->block_group->pinned > 0);
4178 WARN_ON(rc->block_group->reserved > 0);
4179 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
4180out:
f0486c68
YZ
4181 if (err && rw)
4182 btrfs_set_block_group_rw(extent_root, rc->block_group);
5d4f98a2 4183 iput(rc->data_inode);
5d4f98a2
YZ
4184 btrfs_put_block_group(rc->block_group);
4185 kfree(rc);
4186 return err;
4187}
4188
76dda93c
YZ
4189static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4190{
4191 struct btrfs_trans_handle *trans;
79787eaa 4192 int ret, err;
76dda93c 4193
a22285a6 4194 trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
79787eaa
JM
4195 if (IS_ERR(trans))
4196 return PTR_ERR(trans);
76dda93c
YZ
4197
4198 memset(&root->root_item.drop_progress, 0,
4199 sizeof(root->root_item.drop_progress));
4200 root->root_item.drop_level = 0;
4201 btrfs_set_root_refs(&root->root_item, 0);
4202 ret = btrfs_update_root(trans, root->fs_info->tree_root,
4203 &root->root_key, &root->root_item);
76dda93c 4204
79787eaa
JM
4205 err = btrfs_end_transaction(trans, root->fs_info->tree_root);
4206 if (err)
4207 return err;
4208 return ret;
76dda93c
YZ
4209}
4210
5d4f98a2
YZ
4211/*
4212 * recover relocation interrupted by system crash.
4213 *
4214 * this function resumes merging reloc trees with corresponding fs trees.
4215 * this is important for keeping the sharing of tree blocks
4216 */
4217int btrfs_recover_relocation(struct btrfs_root *root)
4218{
4219 LIST_HEAD(reloc_roots);
4220 struct btrfs_key key;
4221 struct btrfs_root *fs_root;
4222 struct btrfs_root *reloc_root;
4223 struct btrfs_path *path;
4224 struct extent_buffer *leaf;
4225 struct reloc_control *rc = NULL;
4226 struct btrfs_trans_handle *trans;
4227 int ret;
4228 int err = 0;
4229
4230 path = btrfs_alloc_path();
4231 if (!path)
4232 return -ENOMEM;
026fd317 4233 path->reada = -1;
5d4f98a2
YZ
4234
4235 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4236 key.type = BTRFS_ROOT_ITEM_KEY;
4237 key.offset = (u64)-1;
4238
4239 while (1) {
4240 ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key,
4241 path, 0, 0);
4242 if (ret < 0) {
4243 err = ret;
4244 goto out;
4245 }
4246 if (ret > 0) {
4247 if (path->slots[0] == 0)
4248 break;
4249 path->slots[0]--;
4250 }
4251 leaf = path->nodes[0];
4252 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
b3b4aa74 4253 btrfs_release_path(path);
5d4f98a2
YZ
4254
4255 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4256 key.type != BTRFS_ROOT_ITEM_KEY)
4257 break;
4258
4259 reloc_root = btrfs_read_fs_root_no_radix(root, &key);
4260 if (IS_ERR(reloc_root)) {
4261 err = PTR_ERR(reloc_root);
4262 goto out;
4263 }
4264
4265 list_add(&reloc_root->root_list, &reloc_roots);
4266
4267 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4268 fs_root = read_fs_root(root->fs_info,
4269 reloc_root->root_key.offset);
4270 if (IS_ERR(fs_root)) {
76dda93c
YZ
4271 ret = PTR_ERR(fs_root);
4272 if (ret != -ENOENT) {
4273 err = ret;
4274 goto out;
4275 }
79787eaa
JM
4276 ret = mark_garbage_root(reloc_root);
4277 if (ret < 0) {
4278 err = ret;
4279 goto out;
4280 }
5d4f98a2
YZ
4281 }
4282 }
4283
4284 if (key.offset == 0)
4285 break;
4286
4287 key.offset--;
4288 }
b3b4aa74 4289 btrfs_release_path(path);
5d4f98a2
YZ
4290
4291 if (list_empty(&reloc_roots))
4292 goto out;
4293
3fd0a558 4294 rc = alloc_reloc_control();
5d4f98a2
YZ
4295 if (!rc) {
4296 err = -ENOMEM;
4297 goto out;
4298 }
4299
5d4f98a2
YZ
4300 rc->extent_root = root->fs_info->extent_root;
4301
4302 set_reloc_control(rc);
4303
7a7eaa40 4304 trans = btrfs_join_transaction(rc->extent_root);
3612b495
TI
4305 if (IS_ERR(trans)) {
4306 unset_reloc_control(rc);
4307 err = PTR_ERR(trans);
4308 goto out_free;
4309 }
3fd0a558
YZ
4310
4311 rc->merge_reloc_tree = 1;
4312
5d4f98a2
YZ
4313 while (!list_empty(&reloc_roots)) {
4314 reloc_root = list_entry(reloc_roots.next,
4315 struct btrfs_root, root_list);
4316 list_del(&reloc_root->root_list);
4317
4318 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4319 list_add_tail(&reloc_root->root_list,
4320 &rc->reloc_roots);
4321 continue;
4322 }
4323
4324 fs_root = read_fs_root(root->fs_info,
4325 reloc_root->root_key.offset);
79787eaa
JM
4326 if (IS_ERR(fs_root)) {
4327 err = PTR_ERR(fs_root);
4328 goto out_free;
4329 }
5d4f98a2 4330
ffd7b339 4331 err = __add_reloc_root(reloc_root);
79787eaa 4332 BUG_ON(err < 0); /* -ENOMEM or logic error */
5d4f98a2
YZ
4333 fs_root->reloc_root = reloc_root;
4334 }
4335
79787eaa
JM
4336 err = btrfs_commit_transaction(trans, rc->extent_root);
4337 if (err)
4338 goto out_free;
5d4f98a2
YZ
4339
4340 merge_reloc_roots(rc);
4341
4342 unset_reloc_control(rc);
4343
7a7eaa40 4344 trans = btrfs_join_transaction(rc->extent_root);
3612b495
TI
4345 if (IS_ERR(trans))
4346 err = PTR_ERR(trans);
4347 else
79787eaa 4348 err = btrfs_commit_transaction(trans, rc->extent_root);
3612b495 4349out_free:
3fd0a558 4350 kfree(rc);
3612b495 4351out:
aca1bba6
LB
4352 if (!list_empty(&reloc_roots))
4353 free_reloc_roots(&reloc_roots);
4354
5d4f98a2
YZ
4355 btrfs_free_path(path);
4356
4357 if (err == 0) {
4358 /* cleanup orphan inode in data relocation tree */
4359 fs_root = read_fs_root(root->fs_info,
4360 BTRFS_DATA_RELOC_TREE_OBJECTID);
4361 if (IS_ERR(fs_root))
4362 err = PTR_ERR(fs_root);
d7ce5843 4363 else
66b4ffd1 4364 err = btrfs_orphan_cleanup(fs_root);
5d4f98a2
YZ
4365 }
4366 return err;
4367}
4368
4369/*
4370 * helper to add ordered checksum for data relocation.
4371 *
4372 * cloning checksum properly handles the nodatasum extents.
4373 * it also saves CPU time to re-calculate the checksum.
4374 */
4375int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4376{
4377 struct btrfs_ordered_sum *sums;
4378 struct btrfs_sector_sum *sector_sum;
4379 struct btrfs_ordered_extent *ordered;
4380 struct btrfs_root *root = BTRFS_I(inode)->root;
4381 size_t offset;
4382 int ret;
4383 u64 disk_bytenr;
4384 LIST_HEAD(list);
4385
4386 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4387 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
4388
4389 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4390 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
a2de733c 4391 disk_bytenr + len - 1, &list, 0);
79787eaa
JM
4392 if (ret)
4393 goto out;
5d4f98a2
YZ
4394
4395 while (!list_empty(&list)) {
4396 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4397 list_del_init(&sums->list);
4398
4399 sector_sum = sums->sums;
4400 sums->bytenr = ordered->start;
4401
4402 offset = 0;
4403 while (offset < sums->len) {
4404 sector_sum->bytenr += ordered->start - disk_bytenr;
4405 sector_sum++;
4406 offset += root->sectorsize;
4407 }
4408
4409 btrfs_add_ordered_sum(inode, ordered, sums);
4410 }
79787eaa 4411out:
5d4f98a2 4412 btrfs_put_ordered_extent(ordered);
411fc6bc 4413 return ret;
5d4f98a2 4414}
3fd0a558
YZ
4415
4416void btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4417 struct btrfs_root *root, struct extent_buffer *buf,
4418 struct extent_buffer *cow)
4419{
4420 struct reloc_control *rc;
4421 struct backref_node *node;
4422 int first_cow = 0;
4423 int level;
4424 int ret;
4425
4426 rc = root->fs_info->reloc_ctl;
4427 if (!rc)
4428 return;
4429
4430 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4431 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4432
4433 level = btrfs_header_level(buf);
4434 if (btrfs_header_generation(buf) <=
4435 btrfs_root_last_snapshot(&root->root_item))
4436 first_cow = 1;
4437
4438 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4439 rc->create_reloc_tree) {
4440 WARN_ON(!first_cow && level == 0);
4441
4442 node = rc->backref_cache.path[level];
4443 BUG_ON(node->bytenr != buf->start &&
4444 node->new_bytenr != buf->start);
4445
4446 drop_node_buffer(node);
4447 extent_buffer_get(cow);
4448 node->eb = cow;
4449 node->new_bytenr = cow->start;
4450
4451 if (!node->pending) {
4452 list_move_tail(&node->list,
4453 &rc->backref_cache.pending[level]);
4454 node->pending = 1;
4455 }
4456
4457 if (first_cow)
4458 __mark_block_processed(rc, node);
4459
4460 if (first_cow && level > 0)
4461 rc->nodes_relocated += buf->len;
4462 }
4463
4464 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS) {
4465 ret = replace_file_extents(trans, rc, root, cow);
4466 BUG_ON(ret);
4467 }
4468}
4469
4470/*
4471 * called before creating snapshot. it calculates metadata reservation
4472 * requried for relocating tree blocks in the snapshot
4473 */
4474void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
4475 struct btrfs_pending_snapshot *pending,
4476 u64 *bytes_to_reserve)
4477{
4478 struct btrfs_root *root;
4479 struct reloc_control *rc;
4480
4481 root = pending->root;
4482 if (!root->reloc_root)
4483 return;
4484
4485 rc = root->fs_info->reloc_ctl;
4486 if (!rc->merge_reloc_tree)
4487 return;
4488
4489 root = root->reloc_root;
4490 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4491 /*
4492 * relocation is in the stage of merging trees. the space
4493 * used by merging a reloc tree is twice the size of
4494 * relocated tree nodes in the worst case. half for cowing
4495 * the reloc tree, half for cowing the fs tree. the space
4496 * used by cowing the reloc tree will be freed after the
4497 * tree is dropped. if we create snapshot, cowing the fs
4498 * tree may use more space than it frees. so we need
4499 * reserve extra space.
4500 */
4501 *bytes_to_reserve += rc->nodes_relocated;
4502}
4503
4504/*
4505 * called after snapshot is created. migrate block reservation
4506 * and create reloc root for the newly created snapshot
4507 */
49b25e05 4508int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
3fd0a558
YZ
4509 struct btrfs_pending_snapshot *pending)
4510{
4511 struct btrfs_root *root = pending->root;
4512 struct btrfs_root *reloc_root;
4513 struct btrfs_root *new_root;
4514 struct reloc_control *rc;
4515 int ret;
4516
4517 if (!root->reloc_root)
49b25e05 4518 return 0;
3fd0a558
YZ
4519
4520 rc = root->fs_info->reloc_ctl;
4521 rc->merging_rsv_size += rc->nodes_relocated;
4522
4523 if (rc->merge_reloc_tree) {
4524 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4525 rc->block_rsv,
4526 rc->nodes_relocated);
49b25e05
JM
4527 if (ret)
4528 return ret;
3fd0a558
YZ
4529 }
4530
4531 new_root = pending->snap;
4532 reloc_root = create_reloc_root(trans, root->reloc_root,
4533 new_root->root_key.objectid);
49b25e05
JM
4534 if (IS_ERR(reloc_root))
4535 return PTR_ERR(reloc_root);
3fd0a558 4536
ffd7b339
JM
4537 ret = __add_reloc_root(reloc_root);
4538 BUG_ON(ret < 0);
3fd0a558
YZ
4539 new_root->reloc_root = reloc_root;
4540
49b25e05 4541 if (rc->create_reloc_tree)
3fd0a558 4542 ret = clone_backref_node(trans, rc, root, reloc_root);
49b25e05 4543 return ret;
3fd0a558 4544}
This page took 0.4232 seconds and 5 git commands to generate.