Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_reserved_extents(struct btrfs_root *root,
36 u64 bytenr, u64 num, int reserve);
37 static int update_block_group(struct btrfs_trans_handle *trans,
38 struct btrfs_root *root,
39 u64 bytenr, u64 num_bytes, int alloc,
40 int mark_free);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
60
61 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62 struct btrfs_root *extent_root, u64 alloc_bytes,
63 u64 flags, int force);
64
65 static noinline int
66 block_group_cache_done(struct btrfs_block_group_cache *cache)
67 {
68 smp_mb();
69 return cache->cached == BTRFS_CACHE_FINISHED;
70 }
71
72 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
73 {
74 return (cache->flags & bits) == bits;
75 }
76
77 /*
78 * this adds the block group to the fs_info rb tree for the block group
79 * cache
80 */
81 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
82 struct btrfs_block_group_cache *block_group)
83 {
84 struct rb_node **p;
85 struct rb_node *parent = NULL;
86 struct btrfs_block_group_cache *cache;
87
88 spin_lock(&info->block_group_cache_lock);
89 p = &info->block_group_cache_tree.rb_node;
90
91 while (*p) {
92 parent = *p;
93 cache = rb_entry(parent, struct btrfs_block_group_cache,
94 cache_node);
95 if (block_group->key.objectid < cache->key.objectid) {
96 p = &(*p)->rb_left;
97 } else if (block_group->key.objectid > cache->key.objectid) {
98 p = &(*p)->rb_right;
99 } else {
100 spin_unlock(&info->block_group_cache_lock);
101 return -EEXIST;
102 }
103 }
104
105 rb_link_node(&block_group->cache_node, parent, p);
106 rb_insert_color(&block_group->cache_node,
107 &info->block_group_cache_tree);
108 spin_unlock(&info->block_group_cache_lock);
109
110 return 0;
111 }
112
113 /*
114 * This will return the block group at or after bytenr if contains is 0, else
115 * it will return the block group that contains the bytenr
116 */
117 static struct btrfs_block_group_cache *
118 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
119 int contains)
120 {
121 struct btrfs_block_group_cache *cache, *ret = NULL;
122 struct rb_node *n;
123 u64 end, start;
124
125 spin_lock(&info->block_group_cache_lock);
126 n = info->block_group_cache_tree.rb_node;
127
128 while (n) {
129 cache = rb_entry(n, struct btrfs_block_group_cache,
130 cache_node);
131 end = cache->key.objectid + cache->key.offset - 1;
132 start = cache->key.objectid;
133
134 if (bytenr < start) {
135 if (!contains && (!ret || start < ret->key.objectid))
136 ret = cache;
137 n = n->rb_left;
138 } else if (bytenr > start) {
139 if (contains && bytenr <= end) {
140 ret = cache;
141 break;
142 }
143 n = n->rb_right;
144 } else {
145 ret = cache;
146 break;
147 }
148 }
149 if (ret)
150 atomic_inc(&ret->count);
151 spin_unlock(&info->block_group_cache_lock);
152
153 return ret;
154 }
155
156 /*
157 * We always set EXTENT_LOCKED for the super mirror extents so we don't
158 * overwrite them, so those bits need to be unset. Also, if we are unmounting
159 * with pinned extents still sitting there because we had a block group caching,
160 * we need to clear those now, since we are done.
161 */
162 void btrfs_free_pinned_extents(struct btrfs_fs_info *info)
163 {
164 u64 start, end, last = 0;
165 int ret;
166
167 while (1) {
168 ret = find_first_extent_bit(&info->pinned_extents, last,
169 &start, &end,
170 EXTENT_LOCKED|EXTENT_DIRTY);
171 if (ret)
172 break;
173
174 clear_extent_bits(&info->pinned_extents, start, end,
175 EXTENT_LOCKED|EXTENT_DIRTY, GFP_NOFS);
176 last = end+1;
177 }
178 }
179
180 static int remove_sb_from_cache(struct btrfs_root *root,
181 struct btrfs_block_group_cache *cache)
182 {
183 struct btrfs_fs_info *fs_info = root->fs_info;
184 u64 bytenr;
185 u64 *logical;
186 int stripe_len;
187 int i, nr, ret;
188
189 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
190 bytenr = btrfs_sb_offset(i);
191 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
192 cache->key.objectid, bytenr,
193 0, &logical, &nr, &stripe_len);
194 BUG_ON(ret);
195 while (nr--) {
196 try_lock_extent(&fs_info->pinned_extents,
197 logical[nr],
198 logical[nr] + stripe_len - 1, GFP_NOFS);
199 }
200 kfree(logical);
201 }
202
203 return 0;
204 }
205
206 /*
207 * this is only called by cache_block_group, since we could have freed extents
208 * we need to check the pinned_extents for any extents that can't be used yet
209 * since their free space will be released as soon as the transaction commits.
210 */
211 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
212 struct btrfs_fs_info *info, u64 start, u64 end)
213 {
214 u64 extent_start, extent_end, size, total_added = 0;
215 int ret;
216
217 while (start < end) {
218 ret = find_first_extent_bit(&info->pinned_extents, start,
219 &extent_start, &extent_end,
220 EXTENT_DIRTY|EXTENT_LOCKED);
221 if (ret)
222 break;
223
224 if (extent_start == start) {
225 start = extent_end + 1;
226 } else if (extent_start > start && extent_start < end) {
227 size = extent_start - start;
228 total_added += size;
229 ret = btrfs_add_free_space(block_group, start,
230 size);
231 BUG_ON(ret);
232 start = extent_end + 1;
233 } else {
234 break;
235 }
236 }
237
238 if (start < end) {
239 size = end - start;
240 total_added += size;
241 ret = btrfs_add_free_space(block_group, start, size);
242 BUG_ON(ret);
243 }
244
245 return total_added;
246 }
247
248 static int caching_kthread(void *data)
249 {
250 struct btrfs_block_group_cache *block_group = data;
251 struct btrfs_fs_info *fs_info = block_group->fs_info;
252 u64 last = 0;
253 struct btrfs_path *path;
254 int ret = 0;
255 struct btrfs_key key;
256 struct extent_buffer *leaf;
257 int slot;
258 u64 total_found = 0;
259
260 BUG_ON(!fs_info);
261
262 path = btrfs_alloc_path();
263 if (!path)
264 return -ENOMEM;
265
266 atomic_inc(&block_group->space_info->caching_threads);
267 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
268 again:
269 /* need to make sure the commit_root doesn't disappear */
270 down_read(&fs_info->extent_root->commit_root_sem);
271
272 /*
273 * We don't want to deadlock with somebody trying to allocate a new
274 * extent for the extent root while also trying to search the extent
275 * root to add free space. So we skip locking and search the commit
276 * root, since its read-only
277 */
278 path->skip_locking = 1;
279 path->search_commit_root = 1;
280 path->reada = 2;
281
282 key.objectid = last;
283 key.offset = 0;
284 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
285 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
286 if (ret < 0)
287 goto err;
288
289 while (1) {
290 smp_mb();
291 if (block_group->fs_info->closing > 1) {
292 last = (u64)-1;
293 break;
294 }
295
296 leaf = path->nodes[0];
297 slot = path->slots[0];
298 if (slot >= btrfs_header_nritems(leaf)) {
299 ret = btrfs_next_leaf(fs_info->extent_root, path);
300 if (ret < 0)
301 goto err;
302 else if (ret)
303 break;
304
305 if (need_resched()) {
306 btrfs_release_path(fs_info->extent_root, path);
307 up_read(&fs_info->extent_root->commit_root_sem);
308 cond_resched();
309 goto again;
310 }
311
312 continue;
313 }
314 btrfs_item_key_to_cpu(leaf, &key, slot);
315 if (key.objectid < block_group->key.objectid)
316 goto next;
317
318 if (key.objectid >= block_group->key.objectid +
319 block_group->key.offset)
320 break;
321
322 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
323 total_found += add_new_free_space(block_group,
324 fs_info, last,
325 key.objectid);
326 last = key.objectid + key.offset;
327 }
328
329 if (total_found > (1024 * 1024 * 2)) {
330 total_found = 0;
331 wake_up(&block_group->caching_q);
332 }
333 next:
334 path->slots[0]++;
335 }
336 ret = 0;
337
338 total_found += add_new_free_space(block_group, fs_info, last,
339 block_group->key.objectid +
340 block_group->key.offset);
341
342 spin_lock(&block_group->lock);
343 block_group->cached = BTRFS_CACHE_FINISHED;
344 spin_unlock(&block_group->lock);
345
346 err:
347 btrfs_free_path(path);
348 up_read(&fs_info->extent_root->commit_root_sem);
349 atomic_dec(&block_group->space_info->caching_threads);
350 wake_up(&block_group->caching_q);
351
352 return 0;
353 }
354
355 static int cache_block_group(struct btrfs_block_group_cache *cache)
356 {
357 struct task_struct *tsk;
358 int ret = 0;
359
360 spin_lock(&cache->lock);
361 if (cache->cached != BTRFS_CACHE_NO) {
362 spin_unlock(&cache->lock);
363 return ret;
364 }
365 cache->cached = BTRFS_CACHE_STARTED;
366 spin_unlock(&cache->lock);
367
368 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
369 cache->key.objectid);
370 if (IS_ERR(tsk)) {
371 ret = PTR_ERR(tsk);
372 printk(KERN_ERR "error running thread %d\n", ret);
373 BUG();
374 }
375
376 return ret;
377 }
378
379 /*
380 * return the block group that starts at or after bytenr
381 */
382 static struct btrfs_block_group_cache *
383 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
384 {
385 struct btrfs_block_group_cache *cache;
386
387 cache = block_group_cache_tree_search(info, bytenr, 0);
388
389 return cache;
390 }
391
392 /*
393 * return the block group that contains the given bytenr
394 */
395 struct btrfs_block_group_cache *btrfs_lookup_block_group(
396 struct btrfs_fs_info *info,
397 u64 bytenr)
398 {
399 struct btrfs_block_group_cache *cache;
400
401 cache = block_group_cache_tree_search(info, bytenr, 1);
402
403 return cache;
404 }
405
406 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
407 {
408 if (atomic_dec_and_test(&cache->count))
409 kfree(cache);
410 }
411
412 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
413 u64 flags)
414 {
415 struct list_head *head = &info->space_info;
416 struct btrfs_space_info *found;
417
418 rcu_read_lock();
419 list_for_each_entry_rcu(found, head, list) {
420 if (found->flags == flags) {
421 rcu_read_unlock();
422 return found;
423 }
424 }
425 rcu_read_unlock();
426 return NULL;
427 }
428
429 /*
430 * after adding space to the filesystem, we need to clear the full flags
431 * on all the space infos.
432 */
433 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
434 {
435 struct list_head *head = &info->space_info;
436 struct btrfs_space_info *found;
437
438 rcu_read_lock();
439 list_for_each_entry_rcu(found, head, list)
440 found->full = 0;
441 rcu_read_unlock();
442 }
443
444 static u64 div_factor(u64 num, int factor)
445 {
446 if (factor == 10)
447 return num;
448 num *= factor;
449 do_div(num, 10);
450 return num;
451 }
452
453 u64 btrfs_find_block_group(struct btrfs_root *root,
454 u64 search_start, u64 search_hint, int owner)
455 {
456 struct btrfs_block_group_cache *cache;
457 u64 used;
458 u64 last = max(search_hint, search_start);
459 u64 group_start = 0;
460 int full_search = 0;
461 int factor = 9;
462 int wrapped = 0;
463 again:
464 while (1) {
465 cache = btrfs_lookup_first_block_group(root->fs_info, last);
466 if (!cache)
467 break;
468
469 spin_lock(&cache->lock);
470 last = cache->key.objectid + cache->key.offset;
471 used = btrfs_block_group_used(&cache->item);
472
473 if ((full_search || !cache->ro) &&
474 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
475 if (used + cache->pinned + cache->reserved <
476 div_factor(cache->key.offset, factor)) {
477 group_start = cache->key.objectid;
478 spin_unlock(&cache->lock);
479 btrfs_put_block_group(cache);
480 goto found;
481 }
482 }
483 spin_unlock(&cache->lock);
484 btrfs_put_block_group(cache);
485 cond_resched();
486 }
487 if (!wrapped) {
488 last = search_start;
489 wrapped = 1;
490 goto again;
491 }
492 if (!full_search && factor < 10) {
493 last = search_start;
494 full_search = 1;
495 factor = 10;
496 goto again;
497 }
498 found:
499 return group_start;
500 }
501
502 /* simple helper to search for an existing extent at a given offset */
503 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
504 {
505 int ret;
506 struct btrfs_key key;
507 struct btrfs_path *path;
508
509 path = btrfs_alloc_path();
510 BUG_ON(!path);
511 key.objectid = start;
512 key.offset = len;
513 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
514 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
515 0, 0);
516 btrfs_free_path(path);
517 return ret;
518 }
519
520 /*
521 * Back reference rules. Back refs have three main goals:
522 *
523 * 1) differentiate between all holders of references to an extent so that
524 * when a reference is dropped we can make sure it was a valid reference
525 * before freeing the extent.
526 *
527 * 2) Provide enough information to quickly find the holders of an extent
528 * if we notice a given block is corrupted or bad.
529 *
530 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
531 * maintenance. This is actually the same as #2, but with a slightly
532 * different use case.
533 *
534 * There are two kinds of back refs. The implicit back refs is optimized
535 * for pointers in non-shared tree blocks. For a given pointer in a block,
536 * back refs of this kind provide information about the block's owner tree
537 * and the pointer's key. These information allow us to find the block by
538 * b-tree searching. The full back refs is for pointers in tree blocks not
539 * referenced by their owner trees. The location of tree block is recorded
540 * in the back refs. Actually the full back refs is generic, and can be
541 * used in all cases the implicit back refs is used. The major shortcoming
542 * of the full back refs is its overhead. Every time a tree block gets
543 * COWed, we have to update back refs entry for all pointers in it.
544 *
545 * For a newly allocated tree block, we use implicit back refs for
546 * pointers in it. This means most tree related operations only involve
547 * implicit back refs. For a tree block created in old transaction, the
548 * only way to drop a reference to it is COW it. So we can detect the
549 * event that tree block loses its owner tree's reference and do the
550 * back refs conversion.
551 *
552 * When a tree block is COW'd through a tree, there are four cases:
553 *
554 * The reference count of the block is one and the tree is the block's
555 * owner tree. Nothing to do in this case.
556 *
557 * The reference count of the block is one and the tree is not the
558 * block's owner tree. In this case, full back refs is used for pointers
559 * in the block. Remove these full back refs, add implicit back refs for
560 * every pointers in the new block.
561 *
562 * The reference count of the block is greater than one and the tree is
563 * the block's owner tree. In this case, implicit back refs is used for
564 * pointers in the block. Add full back refs for every pointers in the
565 * block, increase lower level extents' reference counts. The original
566 * implicit back refs are entailed to the new block.
567 *
568 * The reference count of the block is greater than one and the tree is
569 * not the block's owner tree. Add implicit back refs for every pointer in
570 * the new block, increase lower level extents' reference count.
571 *
572 * Back Reference Key composing:
573 *
574 * The key objectid corresponds to the first byte in the extent,
575 * The key type is used to differentiate between types of back refs.
576 * There are different meanings of the key offset for different types
577 * of back refs.
578 *
579 * File extents can be referenced by:
580 *
581 * - multiple snapshots, subvolumes, or different generations in one subvol
582 * - different files inside a single subvolume
583 * - different offsets inside a file (bookend extents in file.c)
584 *
585 * The extent ref structure for the implicit back refs has fields for:
586 *
587 * - Objectid of the subvolume root
588 * - objectid of the file holding the reference
589 * - original offset in the file
590 * - how many bookend extents
591 *
592 * The key offset for the implicit back refs is hash of the first
593 * three fields.
594 *
595 * The extent ref structure for the full back refs has field for:
596 *
597 * - number of pointers in the tree leaf
598 *
599 * The key offset for the implicit back refs is the first byte of
600 * the tree leaf
601 *
602 * When a file extent is allocated, The implicit back refs is used.
603 * the fields are filled in:
604 *
605 * (root_key.objectid, inode objectid, offset in file, 1)
606 *
607 * When a file extent is removed file truncation, we find the
608 * corresponding implicit back refs and check the following fields:
609 *
610 * (btrfs_header_owner(leaf), inode objectid, offset in file)
611 *
612 * Btree extents can be referenced by:
613 *
614 * - Different subvolumes
615 *
616 * Both the implicit back refs and the full back refs for tree blocks
617 * only consist of key. The key offset for the implicit back refs is
618 * objectid of block's owner tree. The key offset for the full back refs
619 * is the first byte of parent block.
620 *
621 * When implicit back refs is used, information about the lowest key and
622 * level of the tree block are required. These information are stored in
623 * tree block info structure.
624 */
625
626 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
627 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
628 struct btrfs_root *root,
629 struct btrfs_path *path,
630 u64 owner, u32 extra_size)
631 {
632 struct btrfs_extent_item *item;
633 struct btrfs_extent_item_v0 *ei0;
634 struct btrfs_extent_ref_v0 *ref0;
635 struct btrfs_tree_block_info *bi;
636 struct extent_buffer *leaf;
637 struct btrfs_key key;
638 struct btrfs_key found_key;
639 u32 new_size = sizeof(*item);
640 u64 refs;
641 int ret;
642
643 leaf = path->nodes[0];
644 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
645
646 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
647 ei0 = btrfs_item_ptr(leaf, path->slots[0],
648 struct btrfs_extent_item_v0);
649 refs = btrfs_extent_refs_v0(leaf, ei0);
650
651 if (owner == (u64)-1) {
652 while (1) {
653 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
654 ret = btrfs_next_leaf(root, path);
655 if (ret < 0)
656 return ret;
657 BUG_ON(ret > 0);
658 leaf = path->nodes[0];
659 }
660 btrfs_item_key_to_cpu(leaf, &found_key,
661 path->slots[0]);
662 BUG_ON(key.objectid != found_key.objectid);
663 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
664 path->slots[0]++;
665 continue;
666 }
667 ref0 = btrfs_item_ptr(leaf, path->slots[0],
668 struct btrfs_extent_ref_v0);
669 owner = btrfs_ref_objectid_v0(leaf, ref0);
670 break;
671 }
672 }
673 btrfs_release_path(root, path);
674
675 if (owner < BTRFS_FIRST_FREE_OBJECTID)
676 new_size += sizeof(*bi);
677
678 new_size -= sizeof(*ei0);
679 ret = btrfs_search_slot(trans, root, &key, path,
680 new_size + extra_size, 1);
681 if (ret < 0)
682 return ret;
683 BUG_ON(ret);
684
685 ret = btrfs_extend_item(trans, root, path, new_size);
686 BUG_ON(ret);
687
688 leaf = path->nodes[0];
689 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
690 btrfs_set_extent_refs(leaf, item, refs);
691 /* FIXME: get real generation */
692 btrfs_set_extent_generation(leaf, item, 0);
693 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
694 btrfs_set_extent_flags(leaf, item,
695 BTRFS_EXTENT_FLAG_TREE_BLOCK |
696 BTRFS_BLOCK_FLAG_FULL_BACKREF);
697 bi = (struct btrfs_tree_block_info *)(item + 1);
698 /* FIXME: get first key of the block */
699 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
700 btrfs_set_tree_block_level(leaf, bi, (int)owner);
701 } else {
702 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
703 }
704 btrfs_mark_buffer_dirty(leaf);
705 return 0;
706 }
707 #endif
708
709 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
710 {
711 u32 high_crc = ~(u32)0;
712 u32 low_crc = ~(u32)0;
713 __le64 lenum;
714
715 lenum = cpu_to_le64(root_objectid);
716 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
717 lenum = cpu_to_le64(owner);
718 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
719 lenum = cpu_to_le64(offset);
720 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
721
722 return ((u64)high_crc << 31) ^ (u64)low_crc;
723 }
724
725 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
726 struct btrfs_extent_data_ref *ref)
727 {
728 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
729 btrfs_extent_data_ref_objectid(leaf, ref),
730 btrfs_extent_data_ref_offset(leaf, ref));
731 }
732
733 static int match_extent_data_ref(struct extent_buffer *leaf,
734 struct btrfs_extent_data_ref *ref,
735 u64 root_objectid, u64 owner, u64 offset)
736 {
737 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
738 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
739 btrfs_extent_data_ref_offset(leaf, ref) != offset)
740 return 0;
741 return 1;
742 }
743
744 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
745 struct btrfs_root *root,
746 struct btrfs_path *path,
747 u64 bytenr, u64 parent,
748 u64 root_objectid,
749 u64 owner, u64 offset)
750 {
751 struct btrfs_key key;
752 struct btrfs_extent_data_ref *ref;
753 struct extent_buffer *leaf;
754 u32 nritems;
755 int ret;
756 int recow;
757 int err = -ENOENT;
758
759 key.objectid = bytenr;
760 if (parent) {
761 key.type = BTRFS_SHARED_DATA_REF_KEY;
762 key.offset = parent;
763 } else {
764 key.type = BTRFS_EXTENT_DATA_REF_KEY;
765 key.offset = hash_extent_data_ref(root_objectid,
766 owner, offset);
767 }
768 again:
769 recow = 0;
770 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
771 if (ret < 0) {
772 err = ret;
773 goto fail;
774 }
775
776 if (parent) {
777 if (!ret)
778 return 0;
779 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
780 key.type = BTRFS_EXTENT_REF_V0_KEY;
781 btrfs_release_path(root, path);
782 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
783 if (ret < 0) {
784 err = ret;
785 goto fail;
786 }
787 if (!ret)
788 return 0;
789 #endif
790 goto fail;
791 }
792
793 leaf = path->nodes[0];
794 nritems = btrfs_header_nritems(leaf);
795 while (1) {
796 if (path->slots[0] >= nritems) {
797 ret = btrfs_next_leaf(root, path);
798 if (ret < 0)
799 err = ret;
800 if (ret)
801 goto fail;
802
803 leaf = path->nodes[0];
804 nritems = btrfs_header_nritems(leaf);
805 recow = 1;
806 }
807
808 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
809 if (key.objectid != bytenr ||
810 key.type != BTRFS_EXTENT_DATA_REF_KEY)
811 goto fail;
812
813 ref = btrfs_item_ptr(leaf, path->slots[0],
814 struct btrfs_extent_data_ref);
815
816 if (match_extent_data_ref(leaf, ref, root_objectid,
817 owner, offset)) {
818 if (recow) {
819 btrfs_release_path(root, path);
820 goto again;
821 }
822 err = 0;
823 break;
824 }
825 path->slots[0]++;
826 }
827 fail:
828 return err;
829 }
830
831 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
832 struct btrfs_root *root,
833 struct btrfs_path *path,
834 u64 bytenr, u64 parent,
835 u64 root_objectid, u64 owner,
836 u64 offset, int refs_to_add)
837 {
838 struct btrfs_key key;
839 struct extent_buffer *leaf;
840 u32 size;
841 u32 num_refs;
842 int ret;
843
844 key.objectid = bytenr;
845 if (parent) {
846 key.type = BTRFS_SHARED_DATA_REF_KEY;
847 key.offset = parent;
848 size = sizeof(struct btrfs_shared_data_ref);
849 } else {
850 key.type = BTRFS_EXTENT_DATA_REF_KEY;
851 key.offset = hash_extent_data_ref(root_objectid,
852 owner, offset);
853 size = sizeof(struct btrfs_extent_data_ref);
854 }
855
856 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
857 if (ret && ret != -EEXIST)
858 goto fail;
859
860 leaf = path->nodes[0];
861 if (parent) {
862 struct btrfs_shared_data_ref *ref;
863 ref = btrfs_item_ptr(leaf, path->slots[0],
864 struct btrfs_shared_data_ref);
865 if (ret == 0) {
866 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
867 } else {
868 num_refs = btrfs_shared_data_ref_count(leaf, ref);
869 num_refs += refs_to_add;
870 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
871 }
872 } else {
873 struct btrfs_extent_data_ref *ref;
874 while (ret == -EEXIST) {
875 ref = btrfs_item_ptr(leaf, path->slots[0],
876 struct btrfs_extent_data_ref);
877 if (match_extent_data_ref(leaf, ref, root_objectid,
878 owner, offset))
879 break;
880 btrfs_release_path(root, path);
881 key.offset++;
882 ret = btrfs_insert_empty_item(trans, root, path, &key,
883 size);
884 if (ret && ret != -EEXIST)
885 goto fail;
886
887 leaf = path->nodes[0];
888 }
889 ref = btrfs_item_ptr(leaf, path->slots[0],
890 struct btrfs_extent_data_ref);
891 if (ret == 0) {
892 btrfs_set_extent_data_ref_root(leaf, ref,
893 root_objectid);
894 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
895 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
896 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
897 } else {
898 num_refs = btrfs_extent_data_ref_count(leaf, ref);
899 num_refs += refs_to_add;
900 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
901 }
902 }
903 btrfs_mark_buffer_dirty(leaf);
904 ret = 0;
905 fail:
906 btrfs_release_path(root, path);
907 return ret;
908 }
909
910 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
911 struct btrfs_root *root,
912 struct btrfs_path *path,
913 int refs_to_drop)
914 {
915 struct btrfs_key key;
916 struct btrfs_extent_data_ref *ref1 = NULL;
917 struct btrfs_shared_data_ref *ref2 = NULL;
918 struct extent_buffer *leaf;
919 u32 num_refs = 0;
920 int ret = 0;
921
922 leaf = path->nodes[0];
923 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
924
925 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
926 ref1 = btrfs_item_ptr(leaf, path->slots[0],
927 struct btrfs_extent_data_ref);
928 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
929 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
930 ref2 = btrfs_item_ptr(leaf, path->slots[0],
931 struct btrfs_shared_data_ref);
932 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
933 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
934 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
935 struct btrfs_extent_ref_v0 *ref0;
936 ref0 = btrfs_item_ptr(leaf, path->slots[0],
937 struct btrfs_extent_ref_v0);
938 num_refs = btrfs_ref_count_v0(leaf, ref0);
939 #endif
940 } else {
941 BUG();
942 }
943
944 BUG_ON(num_refs < refs_to_drop);
945 num_refs -= refs_to_drop;
946
947 if (num_refs == 0) {
948 ret = btrfs_del_item(trans, root, path);
949 } else {
950 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
951 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
952 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
953 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
954 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
955 else {
956 struct btrfs_extent_ref_v0 *ref0;
957 ref0 = btrfs_item_ptr(leaf, path->slots[0],
958 struct btrfs_extent_ref_v0);
959 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
960 }
961 #endif
962 btrfs_mark_buffer_dirty(leaf);
963 }
964 return ret;
965 }
966
967 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
968 struct btrfs_path *path,
969 struct btrfs_extent_inline_ref *iref)
970 {
971 struct btrfs_key key;
972 struct extent_buffer *leaf;
973 struct btrfs_extent_data_ref *ref1;
974 struct btrfs_shared_data_ref *ref2;
975 u32 num_refs = 0;
976
977 leaf = path->nodes[0];
978 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
979 if (iref) {
980 if (btrfs_extent_inline_ref_type(leaf, iref) ==
981 BTRFS_EXTENT_DATA_REF_KEY) {
982 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
983 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
984 } else {
985 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
986 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
987 }
988 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
989 ref1 = btrfs_item_ptr(leaf, path->slots[0],
990 struct btrfs_extent_data_ref);
991 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
992 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
993 ref2 = btrfs_item_ptr(leaf, path->slots[0],
994 struct btrfs_shared_data_ref);
995 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
996 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
997 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
998 struct btrfs_extent_ref_v0 *ref0;
999 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1000 struct btrfs_extent_ref_v0);
1001 num_refs = btrfs_ref_count_v0(leaf, ref0);
1002 #endif
1003 } else {
1004 WARN_ON(1);
1005 }
1006 return num_refs;
1007 }
1008
1009 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1010 struct btrfs_root *root,
1011 struct btrfs_path *path,
1012 u64 bytenr, u64 parent,
1013 u64 root_objectid)
1014 {
1015 struct btrfs_key key;
1016 int ret;
1017
1018 key.objectid = bytenr;
1019 if (parent) {
1020 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1021 key.offset = parent;
1022 } else {
1023 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1024 key.offset = root_objectid;
1025 }
1026
1027 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1028 if (ret > 0)
1029 ret = -ENOENT;
1030 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1031 if (ret == -ENOENT && parent) {
1032 btrfs_release_path(root, path);
1033 key.type = BTRFS_EXTENT_REF_V0_KEY;
1034 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1035 if (ret > 0)
1036 ret = -ENOENT;
1037 }
1038 #endif
1039 return ret;
1040 }
1041
1042 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1043 struct btrfs_root *root,
1044 struct btrfs_path *path,
1045 u64 bytenr, u64 parent,
1046 u64 root_objectid)
1047 {
1048 struct btrfs_key key;
1049 int ret;
1050
1051 key.objectid = bytenr;
1052 if (parent) {
1053 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1054 key.offset = parent;
1055 } else {
1056 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1057 key.offset = root_objectid;
1058 }
1059
1060 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1061 btrfs_release_path(root, path);
1062 return ret;
1063 }
1064
1065 static inline int extent_ref_type(u64 parent, u64 owner)
1066 {
1067 int type;
1068 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1069 if (parent > 0)
1070 type = BTRFS_SHARED_BLOCK_REF_KEY;
1071 else
1072 type = BTRFS_TREE_BLOCK_REF_KEY;
1073 } else {
1074 if (parent > 0)
1075 type = BTRFS_SHARED_DATA_REF_KEY;
1076 else
1077 type = BTRFS_EXTENT_DATA_REF_KEY;
1078 }
1079 return type;
1080 }
1081
1082 static int find_next_key(struct btrfs_path *path, int level,
1083 struct btrfs_key *key)
1084
1085 {
1086 for (; level < BTRFS_MAX_LEVEL; level++) {
1087 if (!path->nodes[level])
1088 break;
1089 if (path->slots[level] + 1 >=
1090 btrfs_header_nritems(path->nodes[level]))
1091 continue;
1092 if (level == 0)
1093 btrfs_item_key_to_cpu(path->nodes[level], key,
1094 path->slots[level] + 1);
1095 else
1096 btrfs_node_key_to_cpu(path->nodes[level], key,
1097 path->slots[level] + 1);
1098 return 0;
1099 }
1100 return 1;
1101 }
1102
1103 /*
1104 * look for inline back ref. if back ref is found, *ref_ret is set
1105 * to the address of inline back ref, and 0 is returned.
1106 *
1107 * if back ref isn't found, *ref_ret is set to the address where it
1108 * should be inserted, and -ENOENT is returned.
1109 *
1110 * if insert is true and there are too many inline back refs, the path
1111 * points to the extent item, and -EAGAIN is returned.
1112 *
1113 * NOTE: inline back refs are ordered in the same way that back ref
1114 * items in the tree are ordered.
1115 */
1116 static noinline_for_stack
1117 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1118 struct btrfs_root *root,
1119 struct btrfs_path *path,
1120 struct btrfs_extent_inline_ref **ref_ret,
1121 u64 bytenr, u64 num_bytes,
1122 u64 parent, u64 root_objectid,
1123 u64 owner, u64 offset, int insert)
1124 {
1125 struct btrfs_key key;
1126 struct extent_buffer *leaf;
1127 struct btrfs_extent_item *ei;
1128 struct btrfs_extent_inline_ref *iref;
1129 u64 flags;
1130 u64 item_size;
1131 unsigned long ptr;
1132 unsigned long end;
1133 int extra_size;
1134 int type;
1135 int want;
1136 int ret;
1137 int err = 0;
1138
1139 key.objectid = bytenr;
1140 key.type = BTRFS_EXTENT_ITEM_KEY;
1141 key.offset = num_bytes;
1142
1143 want = extent_ref_type(parent, owner);
1144 if (insert) {
1145 extra_size = btrfs_extent_inline_ref_size(want);
1146 path->keep_locks = 1;
1147 } else
1148 extra_size = -1;
1149 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1150 if (ret < 0) {
1151 err = ret;
1152 goto out;
1153 }
1154 BUG_ON(ret);
1155
1156 leaf = path->nodes[0];
1157 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1158 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1159 if (item_size < sizeof(*ei)) {
1160 if (!insert) {
1161 err = -ENOENT;
1162 goto out;
1163 }
1164 ret = convert_extent_item_v0(trans, root, path, owner,
1165 extra_size);
1166 if (ret < 0) {
1167 err = ret;
1168 goto out;
1169 }
1170 leaf = path->nodes[0];
1171 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1172 }
1173 #endif
1174 BUG_ON(item_size < sizeof(*ei));
1175
1176 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1177 flags = btrfs_extent_flags(leaf, ei);
1178
1179 ptr = (unsigned long)(ei + 1);
1180 end = (unsigned long)ei + item_size;
1181
1182 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1183 ptr += sizeof(struct btrfs_tree_block_info);
1184 BUG_ON(ptr > end);
1185 } else {
1186 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1187 }
1188
1189 err = -ENOENT;
1190 while (1) {
1191 if (ptr >= end) {
1192 WARN_ON(ptr > end);
1193 break;
1194 }
1195 iref = (struct btrfs_extent_inline_ref *)ptr;
1196 type = btrfs_extent_inline_ref_type(leaf, iref);
1197 if (want < type)
1198 break;
1199 if (want > type) {
1200 ptr += btrfs_extent_inline_ref_size(type);
1201 continue;
1202 }
1203
1204 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1205 struct btrfs_extent_data_ref *dref;
1206 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1207 if (match_extent_data_ref(leaf, dref, root_objectid,
1208 owner, offset)) {
1209 err = 0;
1210 break;
1211 }
1212 if (hash_extent_data_ref_item(leaf, dref) <
1213 hash_extent_data_ref(root_objectid, owner, offset))
1214 break;
1215 } else {
1216 u64 ref_offset;
1217 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1218 if (parent > 0) {
1219 if (parent == ref_offset) {
1220 err = 0;
1221 break;
1222 }
1223 if (ref_offset < parent)
1224 break;
1225 } else {
1226 if (root_objectid == ref_offset) {
1227 err = 0;
1228 break;
1229 }
1230 if (ref_offset < root_objectid)
1231 break;
1232 }
1233 }
1234 ptr += btrfs_extent_inline_ref_size(type);
1235 }
1236 if (err == -ENOENT && insert) {
1237 if (item_size + extra_size >=
1238 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1239 err = -EAGAIN;
1240 goto out;
1241 }
1242 /*
1243 * To add new inline back ref, we have to make sure
1244 * there is no corresponding back ref item.
1245 * For simplicity, we just do not add new inline back
1246 * ref if there is any kind of item for this block
1247 */
1248 if (find_next_key(path, 0, &key) == 0 &&
1249 key.objectid == bytenr &&
1250 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1251 err = -EAGAIN;
1252 goto out;
1253 }
1254 }
1255 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1256 out:
1257 if (insert) {
1258 path->keep_locks = 0;
1259 btrfs_unlock_up_safe(path, 1);
1260 }
1261 return err;
1262 }
1263
1264 /*
1265 * helper to add new inline back ref
1266 */
1267 static noinline_for_stack
1268 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1269 struct btrfs_root *root,
1270 struct btrfs_path *path,
1271 struct btrfs_extent_inline_ref *iref,
1272 u64 parent, u64 root_objectid,
1273 u64 owner, u64 offset, int refs_to_add,
1274 struct btrfs_delayed_extent_op *extent_op)
1275 {
1276 struct extent_buffer *leaf;
1277 struct btrfs_extent_item *ei;
1278 unsigned long ptr;
1279 unsigned long end;
1280 unsigned long item_offset;
1281 u64 refs;
1282 int size;
1283 int type;
1284 int ret;
1285
1286 leaf = path->nodes[0];
1287 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1288 item_offset = (unsigned long)iref - (unsigned long)ei;
1289
1290 type = extent_ref_type(parent, owner);
1291 size = btrfs_extent_inline_ref_size(type);
1292
1293 ret = btrfs_extend_item(trans, root, path, size);
1294 BUG_ON(ret);
1295
1296 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1297 refs = btrfs_extent_refs(leaf, ei);
1298 refs += refs_to_add;
1299 btrfs_set_extent_refs(leaf, ei, refs);
1300 if (extent_op)
1301 __run_delayed_extent_op(extent_op, leaf, ei);
1302
1303 ptr = (unsigned long)ei + item_offset;
1304 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1305 if (ptr < end - size)
1306 memmove_extent_buffer(leaf, ptr + size, ptr,
1307 end - size - ptr);
1308
1309 iref = (struct btrfs_extent_inline_ref *)ptr;
1310 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1311 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1312 struct btrfs_extent_data_ref *dref;
1313 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1314 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1315 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1316 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1317 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1318 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1319 struct btrfs_shared_data_ref *sref;
1320 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1321 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1322 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1323 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1324 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1325 } else {
1326 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1327 }
1328 btrfs_mark_buffer_dirty(leaf);
1329 return 0;
1330 }
1331
1332 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1333 struct btrfs_root *root,
1334 struct btrfs_path *path,
1335 struct btrfs_extent_inline_ref **ref_ret,
1336 u64 bytenr, u64 num_bytes, u64 parent,
1337 u64 root_objectid, u64 owner, u64 offset)
1338 {
1339 int ret;
1340
1341 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1342 bytenr, num_bytes, parent,
1343 root_objectid, owner, offset, 0);
1344 if (ret != -ENOENT)
1345 return ret;
1346
1347 btrfs_release_path(root, path);
1348 *ref_ret = NULL;
1349
1350 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1351 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1352 root_objectid);
1353 } else {
1354 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1355 root_objectid, owner, offset);
1356 }
1357 return ret;
1358 }
1359
1360 /*
1361 * helper to update/remove inline back ref
1362 */
1363 static noinline_for_stack
1364 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1365 struct btrfs_root *root,
1366 struct btrfs_path *path,
1367 struct btrfs_extent_inline_ref *iref,
1368 int refs_to_mod,
1369 struct btrfs_delayed_extent_op *extent_op)
1370 {
1371 struct extent_buffer *leaf;
1372 struct btrfs_extent_item *ei;
1373 struct btrfs_extent_data_ref *dref = NULL;
1374 struct btrfs_shared_data_ref *sref = NULL;
1375 unsigned long ptr;
1376 unsigned long end;
1377 u32 item_size;
1378 int size;
1379 int type;
1380 int ret;
1381 u64 refs;
1382
1383 leaf = path->nodes[0];
1384 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1385 refs = btrfs_extent_refs(leaf, ei);
1386 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1387 refs += refs_to_mod;
1388 btrfs_set_extent_refs(leaf, ei, refs);
1389 if (extent_op)
1390 __run_delayed_extent_op(extent_op, leaf, ei);
1391
1392 type = btrfs_extent_inline_ref_type(leaf, iref);
1393
1394 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1395 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1396 refs = btrfs_extent_data_ref_count(leaf, dref);
1397 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1398 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1399 refs = btrfs_shared_data_ref_count(leaf, sref);
1400 } else {
1401 refs = 1;
1402 BUG_ON(refs_to_mod != -1);
1403 }
1404
1405 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1406 refs += refs_to_mod;
1407
1408 if (refs > 0) {
1409 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1410 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1411 else
1412 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1413 } else {
1414 size = btrfs_extent_inline_ref_size(type);
1415 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1416 ptr = (unsigned long)iref;
1417 end = (unsigned long)ei + item_size;
1418 if (ptr + size < end)
1419 memmove_extent_buffer(leaf, ptr, ptr + size,
1420 end - ptr - size);
1421 item_size -= size;
1422 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1423 BUG_ON(ret);
1424 }
1425 btrfs_mark_buffer_dirty(leaf);
1426 return 0;
1427 }
1428
1429 static noinline_for_stack
1430 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1431 struct btrfs_root *root,
1432 struct btrfs_path *path,
1433 u64 bytenr, u64 num_bytes, u64 parent,
1434 u64 root_objectid, u64 owner,
1435 u64 offset, int refs_to_add,
1436 struct btrfs_delayed_extent_op *extent_op)
1437 {
1438 struct btrfs_extent_inline_ref *iref;
1439 int ret;
1440
1441 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1442 bytenr, num_bytes, parent,
1443 root_objectid, owner, offset, 1);
1444 if (ret == 0) {
1445 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1446 ret = update_inline_extent_backref(trans, root, path, iref,
1447 refs_to_add, extent_op);
1448 } else if (ret == -ENOENT) {
1449 ret = setup_inline_extent_backref(trans, root, path, iref,
1450 parent, root_objectid,
1451 owner, offset, refs_to_add,
1452 extent_op);
1453 }
1454 return ret;
1455 }
1456
1457 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1458 struct btrfs_root *root,
1459 struct btrfs_path *path,
1460 u64 bytenr, u64 parent, u64 root_objectid,
1461 u64 owner, u64 offset, int refs_to_add)
1462 {
1463 int ret;
1464 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1465 BUG_ON(refs_to_add != 1);
1466 ret = insert_tree_block_ref(trans, root, path, bytenr,
1467 parent, root_objectid);
1468 } else {
1469 ret = insert_extent_data_ref(trans, root, path, bytenr,
1470 parent, root_objectid,
1471 owner, offset, refs_to_add);
1472 }
1473 return ret;
1474 }
1475
1476 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1477 struct btrfs_root *root,
1478 struct btrfs_path *path,
1479 struct btrfs_extent_inline_ref *iref,
1480 int refs_to_drop, int is_data)
1481 {
1482 int ret;
1483
1484 BUG_ON(!is_data && refs_to_drop != 1);
1485 if (iref) {
1486 ret = update_inline_extent_backref(trans, root, path, iref,
1487 -refs_to_drop, NULL);
1488 } else if (is_data) {
1489 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1490 } else {
1491 ret = btrfs_del_item(trans, root, path);
1492 }
1493 return ret;
1494 }
1495
1496 #ifdef BIO_RW_DISCARD
1497 static void btrfs_issue_discard(struct block_device *bdev,
1498 u64 start, u64 len)
1499 {
1500 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1501 }
1502 #endif
1503
1504 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1505 u64 num_bytes)
1506 {
1507 #ifdef BIO_RW_DISCARD
1508 int ret;
1509 u64 map_length = num_bytes;
1510 struct btrfs_multi_bio *multi = NULL;
1511
1512 /* Tell the block device(s) that the sectors can be discarded */
1513 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1514 bytenr, &map_length, &multi, 0);
1515 if (!ret) {
1516 struct btrfs_bio_stripe *stripe = multi->stripes;
1517 int i;
1518
1519 if (map_length > num_bytes)
1520 map_length = num_bytes;
1521
1522 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1523 btrfs_issue_discard(stripe->dev->bdev,
1524 stripe->physical,
1525 map_length);
1526 }
1527 kfree(multi);
1528 }
1529
1530 return ret;
1531 #else
1532 return 0;
1533 #endif
1534 }
1535
1536 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1537 struct btrfs_root *root,
1538 u64 bytenr, u64 num_bytes, u64 parent,
1539 u64 root_objectid, u64 owner, u64 offset)
1540 {
1541 int ret;
1542 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1543 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1544
1545 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1546 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1547 parent, root_objectid, (int)owner,
1548 BTRFS_ADD_DELAYED_REF, NULL);
1549 } else {
1550 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1551 parent, root_objectid, owner, offset,
1552 BTRFS_ADD_DELAYED_REF, NULL);
1553 }
1554 return ret;
1555 }
1556
1557 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1558 struct btrfs_root *root,
1559 u64 bytenr, u64 num_bytes,
1560 u64 parent, u64 root_objectid,
1561 u64 owner, u64 offset, int refs_to_add,
1562 struct btrfs_delayed_extent_op *extent_op)
1563 {
1564 struct btrfs_path *path;
1565 struct extent_buffer *leaf;
1566 struct btrfs_extent_item *item;
1567 u64 refs;
1568 int ret;
1569 int err = 0;
1570
1571 path = btrfs_alloc_path();
1572 if (!path)
1573 return -ENOMEM;
1574
1575 path->reada = 1;
1576 path->leave_spinning = 1;
1577 /* this will setup the path even if it fails to insert the back ref */
1578 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1579 path, bytenr, num_bytes, parent,
1580 root_objectid, owner, offset,
1581 refs_to_add, extent_op);
1582 if (ret == 0)
1583 goto out;
1584
1585 if (ret != -EAGAIN) {
1586 err = ret;
1587 goto out;
1588 }
1589
1590 leaf = path->nodes[0];
1591 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1592 refs = btrfs_extent_refs(leaf, item);
1593 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1594 if (extent_op)
1595 __run_delayed_extent_op(extent_op, leaf, item);
1596
1597 btrfs_mark_buffer_dirty(leaf);
1598 btrfs_release_path(root->fs_info->extent_root, path);
1599
1600 path->reada = 1;
1601 path->leave_spinning = 1;
1602
1603 /* now insert the actual backref */
1604 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1605 path, bytenr, parent, root_objectid,
1606 owner, offset, refs_to_add);
1607 BUG_ON(ret);
1608 out:
1609 btrfs_free_path(path);
1610 return err;
1611 }
1612
1613 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1614 struct btrfs_root *root,
1615 struct btrfs_delayed_ref_node *node,
1616 struct btrfs_delayed_extent_op *extent_op,
1617 int insert_reserved)
1618 {
1619 int ret = 0;
1620 struct btrfs_delayed_data_ref *ref;
1621 struct btrfs_key ins;
1622 u64 parent = 0;
1623 u64 ref_root = 0;
1624 u64 flags = 0;
1625
1626 ins.objectid = node->bytenr;
1627 ins.offset = node->num_bytes;
1628 ins.type = BTRFS_EXTENT_ITEM_KEY;
1629
1630 ref = btrfs_delayed_node_to_data_ref(node);
1631 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1632 parent = ref->parent;
1633 else
1634 ref_root = ref->root;
1635
1636 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1637 if (extent_op) {
1638 BUG_ON(extent_op->update_key);
1639 flags |= extent_op->flags_to_set;
1640 }
1641 ret = alloc_reserved_file_extent(trans, root,
1642 parent, ref_root, flags,
1643 ref->objectid, ref->offset,
1644 &ins, node->ref_mod);
1645 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1646 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1647 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1648 node->num_bytes, parent,
1649 ref_root, ref->objectid,
1650 ref->offset, node->ref_mod,
1651 extent_op);
1652 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1653 ret = __btrfs_free_extent(trans, root, node->bytenr,
1654 node->num_bytes, parent,
1655 ref_root, ref->objectid,
1656 ref->offset, node->ref_mod,
1657 extent_op);
1658 } else {
1659 BUG();
1660 }
1661 return ret;
1662 }
1663
1664 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1665 struct extent_buffer *leaf,
1666 struct btrfs_extent_item *ei)
1667 {
1668 u64 flags = btrfs_extent_flags(leaf, ei);
1669 if (extent_op->update_flags) {
1670 flags |= extent_op->flags_to_set;
1671 btrfs_set_extent_flags(leaf, ei, flags);
1672 }
1673
1674 if (extent_op->update_key) {
1675 struct btrfs_tree_block_info *bi;
1676 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1677 bi = (struct btrfs_tree_block_info *)(ei + 1);
1678 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1679 }
1680 }
1681
1682 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1683 struct btrfs_root *root,
1684 struct btrfs_delayed_ref_node *node,
1685 struct btrfs_delayed_extent_op *extent_op)
1686 {
1687 struct btrfs_key key;
1688 struct btrfs_path *path;
1689 struct btrfs_extent_item *ei;
1690 struct extent_buffer *leaf;
1691 u32 item_size;
1692 int ret;
1693 int err = 0;
1694
1695 path = btrfs_alloc_path();
1696 if (!path)
1697 return -ENOMEM;
1698
1699 key.objectid = node->bytenr;
1700 key.type = BTRFS_EXTENT_ITEM_KEY;
1701 key.offset = node->num_bytes;
1702
1703 path->reada = 1;
1704 path->leave_spinning = 1;
1705 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1706 path, 0, 1);
1707 if (ret < 0) {
1708 err = ret;
1709 goto out;
1710 }
1711 if (ret > 0) {
1712 err = -EIO;
1713 goto out;
1714 }
1715
1716 leaf = path->nodes[0];
1717 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1718 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1719 if (item_size < sizeof(*ei)) {
1720 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1721 path, (u64)-1, 0);
1722 if (ret < 0) {
1723 err = ret;
1724 goto out;
1725 }
1726 leaf = path->nodes[0];
1727 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1728 }
1729 #endif
1730 BUG_ON(item_size < sizeof(*ei));
1731 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1732 __run_delayed_extent_op(extent_op, leaf, ei);
1733
1734 btrfs_mark_buffer_dirty(leaf);
1735 out:
1736 btrfs_free_path(path);
1737 return err;
1738 }
1739
1740 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1741 struct btrfs_root *root,
1742 struct btrfs_delayed_ref_node *node,
1743 struct btrfs_delayed_extent_op *extent_op,
1744 int insert_reserved)
1745 {
1746 int ret = 0;
1747 struct btrfs_delayed_tree_ref *ref;
1748 struct btrfs_key ins;
1749 u64 parent = 0;
1750 u64 ref_root = 0;
1751
1752 ins.objectid = node->bytenr;
1753 ins.offset = node->num_bytes;
1754 ins.type = BTRFS_EXTENT_ITEM_KEY;
1755
1756 ref = btrfs_delayed_node_to_tree_ref(node);
1757 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1758 parent = ref->parent;
1759 else
1760 ref_root = ref->root;
1761
1762 BUG_ON(node->ref_mod != 1);
1763 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1764 BUG_ON(!extent_op || !extent_op->update_flags ||
1765 !extent_op->update_key);
1766 ret = alloc_reserved_tree_block(trans, root,
1767 parent, ref_root,
1768 extent_op->flags_to_set,
1769 &extent_op->key,
1770 ref->level, &ins);
1771 update_reserved_extents(root, ins.objectid, ins.offset, 0);
1772 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1773 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1774 node->num_bytes, parent, ref_root,
1775 ref->level, 0, 1, extent_op);
1776 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1777 ret = __btrfs_free_extent(trans, root, node->bytenr,
1778 node->num_bytes, parent, ref_root,
1779 ref->level, 0, 1, extent_op);
1780 } else {
1781 BUG();
1782 }
1783 return ret;
1784 }
1785
1786
1787 /* helper function to actually process a single delayed ref entry */
1788 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1789 struct btrfs_root *root,
1790 struct btrfs_delayed_ref_node *node,
1791 struct btrfs_delayed_extent_op *extent_op,
1792 int insert_reserved)
1793 {
1794 int ret;
1795 if (btrfs_delayed_ref_is_head(node)) {
1796 struct btrfs_delayed_ref_head *head;
1797 /*
1798 * we've hit the end of the chain and we were supposed
1799 * to insert this extent into the tree. But, it got
1800 * deleted before we ever needed to insert it, so all
1801 * we have to do is clean up the accounting
1802 */
1803 BUG_ON(extent_op);
1804 head = btrfs_delayed_node_to_head(node);
1805 if (insert_reserved) {
1806 if (head->is_data) {
1807 ret = btrfs_del_csums(trans, root,
1808 node->bytenr,
1809 node->num_bytes);
1810 BUG_ON(ret);
1811 }
1812 btrfs_update_pinned_extents(root, node->bytenr,
1813 node->num_bytes, 1);
1814 update_reserved_extents(root, node->bytenr,
1815 node->num_bytes, 0);
1816 }
1817 mutex_unlock(&head->mutex);
1818 return 0;
1819 }
1820
1821 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1822 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1823 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1824 insert_reserved);
1825 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1826 node->type == BTRFS_SHARED_DATA_REF_KEY)
1827 ret = run_delayed_data_ref(trans, root, node, extent_op,
1828 insert_reserved);
1829 else
1830 BUG();
1831 return ret;
1832 }
1833
1834 static noinline struct btrfs_delayed_ref_node *
1835 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1836 {
1837 struct rb_node *node;
1838 struct btrfs_delayed_ref_node *ref;
1839 int action = BTRFS_ADD_DELAYED_REF;
1840 again:
1841 /*
1842 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1843 * this prevents ref count from going down to zero when
1844 * there still are pending delayed ref.
1845 */
1846 node = rb_prev(&head->node.rb_node);
1847 while (1) {
1848 if (!node)
1849 break;
1850 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1851 rb_node);
1852 if (ref->bytenr != head->node.bytenr)
1853 break;
1854 if (ref->action == action)
1855 return ref;
1856 node = rb_prev(node);
1857 }
1858 if (action == BTRFS_ADD_DELAYED_REF) {
1859 action = BTRFS_DROP_DELAYED_REF;
1860 goto again;
1861 }
1862 return NULL;
1863 }
1864
1865 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1866 struct btrfs_root *root,
1867 struct list_head *cluster)
1868 {
1869 struct btrfs_delayed_ref_root *delayed_refs;
1870 struct btrfs_delayed_ref_node *ref;
1871 struct btrfs_delayed_ref_head *locked_ref = NULL;
1872 struct btrfs_delayed_extent_op *extent_op;
1873 int ret;
1874 int count = 0;
1875 int must_insert_reserved = 0;
1876
1877 delayed_refs = &trans->transaction->delayed_refs;
1878 while (1) {
1879 if (!locked_ref) {
1880 /* pick a new head ref from the cluster list */
1881 if (list_empty(cluster))
1882 break;
1883
1884 locked_ref = list_entry(cluster->next,
1885 struct btrfs_delayed_ref_head, cluster);
1886
1887 /* grab the lock that says we are going to process
1888 * all the refs for this head */
1889 ret = btrfs_delayed_ref_lock(trans, locked_ref);
1890
1891 /*
1892 * we may have dropped the spin lock to get the head
1893 * mutex lock, and that might have given someone else
1894 * time to free the head. If that's true, it has been
1895 * removed from our list and we can move on.
1896 */
1897 if (ret == -EAGAIN) {
1898 locked_ref = NULL;
1899 count++;
1900 continue;
1901 }
1902 }
1903
1904 /*
1905 * record the must insert reserved flag before we
1906 * drop the spin lock.
1907 */
1908 must_insert_reserved = locked_ref->must_insert_reserved;
1909 locked_ref->must_insert_reserved = 0;
1910
1911 extent_op = locked_ref->extent_op;
1912 locked_ref->extent_op = NULL;
1913
1914 /*
1915 * locked_ref is the head node, so we have to go one
1916 * node back for any delayed ref updates
1917 */
1918 ref = select_delayed_ref(locked_ref);
1919 if (!ref) {
1920 /* All delayed refs have been processed, Go ahead
1921 * and send the head node to run_one_delayed_ref,
1922 * so that any accounting fixes can happen
1923 */
1924 ref = &locked_ref->node;
1925
1926 if (extent_op && must_insert_reserved) {
1927 kfree(extent_op);
1928 extent_op = NULL;
1929 }
1930
1931 if (extent_op) {
1932 spin_unlock(&delayed_refs->lock);
1933
1934 ret = run_delayed_extent_op(trans, root,
1935 ref, extent_op);
1936 BUG_ON(ret);
1937 kfree(extent_op);
1938
1939 cond_resched();
1940 spin_lock(&delayed_refs->lock);
1941 continue;
1942 }
1943
1944 list_del_init(&locked_ref->cluster);
1945 locked_ref = NULL;
1946 }
1947
1948 ref->in_tree = 0;
1949 rb_erase(&ref->rb_node, &delayed_refs->root);
1950 delayed_refs->num_entries--;
1951
1952 spin_unlock(&delayed_refs->lock);
1953
1954 ret = run_one_delayed_ref(trans, root, ref, extent_op,
1955 must_insert_reserved);
1956 BUG_ON(ret);
1957
1958 btrfs_put_delayed_ref(ref);
1959 kfree(extent_op);
1960 count++;
1961
1962 cond_resched();
1963 spin_lock(&delayed_refs->lock);
1964 }
1965 return count;
1966 }
1967
1968 /*
1969 * this starts processing the delayed reference count updates and
1970 * extent insertions we have queued up so far. count can be
1971 * 0, which means to process everything in the tree at the start
1972 * of the run (but not newly added entries), or it can be some target
1973 * number you'd like to process.
1974 */
1975 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1976 struct btrfs_root *root, unsigned long count)
1977 {
1978 struct rb_node *node;
1979 struct btrfs_delayed_ref_root *delayed_refs;
1980 struct btrfs_delayed_ref_node *ref;
1981 struct list_head cluster;
1982 int ret;
1983 int run_all = count == (unsigned long)-1;
1984 int run_most = 0;
1985
1986 if (root == root->fs_info->extent_root)
1987 root = root->fs_info->tree_root;
1988
1989 delayed_refs = &trans->transaction->delayed_refs;
1990 INIT_LIST_HEAD(&cluster);
1991 again:
1992 spin_lock(&delayed_refs->lock);
1993 if (count == 0) {
1994 count = delayed_refs->num_entries * 2;
1995 run_most = 1;
1996 }
1997 while (1) {
1998 if (!(run_all || run_most) &&
1999 delayed_refs->num_heads_ready < 64)
2000 break;
2001
2002 /*
2003 * go find something we can process in the rbtree. We start at
2004 * the beginning of the tree, and then build a cluster
2005 * of refs to process starting at the first one we are able to
2006 * lock
2007 */
2008 ret = btrfs_find_ref_cluster(trans, &cluster,
2009 delayed_refs->run_delayed_start);
2010 if (ret)
2011 break;
2012
2013 ret = run_clustered_refs(trans, root, &cluster);
2014 BUG_ON(ret < 0);
2015
2016 count -= min_t(unsigned long, ret, count);
2017
2018 if (count == 0)
2019 break;
2020 }
2021
2022 if (run_all) {
2023 node = rb_first(&delayed_refs->root);
2024 if (!node)
2025 goto out;
2026 count = (unsigned long)-1;
2027
2028 while (node) {
2029 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2030 rb_node);
2031 if (btrfs_delayed_ref_is_head(ref)) {
2032 struct btrfs_delayed_ref_head *head;
2033
2034 head = btrfs_delayed_node_to_head(ref);
2035 atomic_inc(&ref->refs);
2036
2037 spin_unlock(&delayed_refs->lock);
2038 mutex_lock(&head->mutex);
2039 mutex_unlock(&head->mutex);
2040
2041 btrfs_put_delayed_ref(ref);
2042 cond_resched();
2043 goto again;
2044 }
2045 node = rb_next(node);
2046 }
2047 spin_unlock(&delayed_refs->lock);
2048 schedule_timeout(1);
2049 goto again;
2050 }
2051 out:
2052 spin_unlock(&delayed_refs->lock);
2053 return 0;
2054 }
2055
2056 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2057 struct btrfs_root *root,
2058 u64 bytenr, u64 num_bytes, u64 flags,
2059 int is_data)
2060 {
2061 struct btrfs_delayed_extent_op *extent_op;
2062 int ret;
2063
2064 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2065 if (!extent_op)
2066 return -ENOMEM;
2067
2068 extent_op->flags_to_set = flags;
2069 extent_op->update_flags = 1;
2070 extent_op->update_key = 0;
2071 extent_op->is_data = is_data ? 1 : 0;
2072
2073 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2074 if (ret)
2075 kfree(extent_op);
2076 return ret;
2077 }
2078
2079 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2080 struct btrfs_root *root,
2081 struct btrfs_path *path,
2082 u64 objectid, u64 offset, u64 bytenr)
2083 {
2084 struct btrfs_delayed_ref_head *head;
2085 struct btrfs_delayed_ref_node *ref;
2086 struct btrfs_delayed_data_ref *data_ref;
2087 struct btrfs_delayed_ref_root *delayed_refs;
2088 struct rb_node *node;
2089 int ret = 0;
2090
2091 ret = -ENOENT;
2092 delayed_refs = &trans->transaction->delayed_refs;
2093 spin_lock(&delayed_refs->lock);
2094 head = btrfs_find_delayed_ref_head(trans, bytenr);
2095 if (!head)
2096 goto out;
2097
2098 if (!mutex_trylock(&head->mutex)) {
2099 atomic_inc(&head->node.refs);
2100 spin_unlock(&delayed_refs->lock);
2101
2102 btrfs_release_path(root->fs_info->extent_root, path);
2103
2104 mutex_lock(&head->mutex);
2105 mutex_unlock(&head->mutex);
2106 btrfs_put_delayed_ref(&head->node);
2107 return -EAGAIN;
2108 }
2109
2110 node = rb_prev(&head->node.rb_node);
2111 if (!node)
2112 goto out_unlock;
2113
2114 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2115
2116 if (ref->bytenr != bytenr)
2117 goto out_unlock;
2118
2119 ret = 1;
2120 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2121 goto out_unlock;
2122
2123 data_ref = btrfs_delayed_node_to_data_ref(ref);
2124
2125 node = rb_prev(node);
2126 if (node) {
2127 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2128 if (ref->bytenr == bytenr)
2129 goto out_unlock;
2130 }
2131
2132 if (data_ref->root != root->root_key.objectid ||
2133 data_ref->objectid != objectid || data_ref->offset != offset)
2134 goto out_unlock;
2135
2136 ret = 0;
2137 out_unlock:
2138 mutex_unlock(&head->mutex);
2139 out:
2140 spin_unlock(&delayed_refs->lock);
2141 return ret;
2142 }
2143
2144 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2145 struct btrfs_root *root,
2146 struct btrfs_path *path,
2147 u64 objectid, u64 offset, u64 bytenr)
2148 {
2149 struct btrfs_root *extent_root = root->fs_info->extent_root;
2150 struct extent_buffer *leaf;
2151 struct btrfs_extent_data_ref *ref;
2152 struct btrfs_extent_inline_ref *iref;
2153 struct btrfs_extent_item *ei;
2154 struct btrfs_key key;
2155 u32 item_size;
2156 int ret;
2157
2158 key.objectid = bytenr;
2159 key.offset = (u64)-1;
2160 key.type = BTRFS_EXTENT_ITEM_KEY;
2161
2162 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2163 if (ret < 0)
2164 goto out;
2165 BUG_ON(ret == 0);
2166
2167 ret = -ENOENT;
2168 if (path->slots[0] == 0)
2169 goto out;
2170
2171 path->slots[0]--;
2172 leaf = path->nodes[0];
2173 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2174
2175 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2176 goto out;
2177
2178 ret = 1;
2179 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2180 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2181 if (item_size < sizeof(*ei)) {
2182 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2183 goto out;
2184 }
2185 #endif
2186 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2187
2188 if (item_size != sizeof(*ei) +
2189 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2190 goto out;
2191
2192 if (btrfs_extent_generation(leaf, ei) <=
2193 btrfs_root_last_snapshot(&root->root_item))
2194 goto out;
2195
2196 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2197 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2198 BTRFS_EXTENT_DATA_REF_KEY)
2199 goto out;
2200
2201 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2202 if (btrfs_extent_refs(leaf, ei) !=
2203 btrfs_extent_data_ref_count(leaf, ref) ||
2204 btrfs_extent_data_ref_root(leaf, ref) !=
2205 root->root_key.objectid ||
2206 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2207 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2208 goto out;
2209
2210 ret = 0;
2211 out:
2212 return ret;
2213 }
2214
2215 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2216 struct btrfs_root *root,
2217 u64 objectid, u64 offset, u64 bytenr)
2218 {
2219 struct btrfs_path *path;
2220 int ret;
2221 int ret2;
2222
2223 path = btrfs_alloc_path();
2224 if (!path)
2225 return -ENOENT;
2226
2227 do {
2228 ret = check_committed_ref(trans, root, path, objectid,
2229 offset, bytenr);
2230 if (ret && ret != -ENOENT)
2231 goto out;
2232
2233 ret2 = check_delayed_ref(trans, root, path, objectid,
2234 offset, bytenr);
2235 } while (ret2 == -EAGAIN);
2236
2237 if (ret2 && ret2 != -ENOENT) {
2238 ret = ret2;
2239 goto out;
2240 }
2241
2242 if (ret != -ENOENT || ret2 != -ENOENT)
2243 ret = 0;
2244 out:
2245 btrfs_free_path(path);
2246 return ret;
2247 }
2248
2249 #if 0
2250 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2251 struct extent_buffer *buf, u32 nr_extents)
2252 {
2253 struct btrfs_key key;
2254 struct btrfs_file_extent_item *fi;
2255 u64 root_gen;
2256 u32 nritems;
2257 int i;
2258 int level;
2259 int ret = 0;
2260 int shared = 0;
2261
2262 if (!root->ref_cows)
2263 return 0;
2264
2265 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2266 shared = 0;
2267 root_gen = root->root_key.offset;
2268 } else {
2269 shared = 1;
2270 root_gen = trans->transid - 1;
2271 }
2272
2273 level = btrfs_header_level(buf);
2274 nritems = btrfs_header_nritems(buf);
2275
2276 if (level == 0) {
2277 struct btrfs_leaf_ref *ref;
2278 struct btrfs_extent_info *info;
2279
2280 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2281 if (!ref) {
2282 ret = -ENOMEM;
2283 goto out;
2284 }
2285
2286 ref->root_gen = root_gen;
2287 ref->bytenr = buf->start;
2288 ref->owner = btrfs_header_owner(buf);
2289 ref->generation = btrfs_header_generation(buf);
2290 ref->nritems = nr_extents;
2291 info = ref->extents;
2292
2293 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2294 u64 disk_bytenr;
2295 btrfs_item_key_to_cpu(buf, &key, i);
2296 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2297 continue;
2298 fi = btrfs_item_ptr(buf, i,
2299 struct btrfs_file_extent_item);
2300 if (btrfs_file_extent_type(buf, fi) ==
2301 BTRFS_FILE_EXTENT_INLINE)
2302 continue;
2303 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2304 if (disk_bytenr == 0)
2305 continue;
2306
2307 info->bytenr = disk_bytenr;
2308 info->num_bytes =
2309 btrfs_file_extent_disk_num_bytes(buf, fi);
2310 info->objectid = key.objectid;
2311 info->offset = key.offset;
2312 info++;
2313 }
2314
2315 ret = btrfs_add_leaf_ref(root, ref, shared);
2316 if (ret == -EEXIST && shared) {
2317 struct btrfs_leaf_ref *old;
2318 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2319 BUG_ON(!old);
2320 btrfs_remove_leaf_ref(root, old);
2321 btrfs_free_leaf_ref(root, old);
2322 ret = btrfs_add_leaf_ref(root, ref, shared);
2323 }
2324 WARN_ON(ret);
2325 btrfs_free_leaf_ref(root, ref);
2326 }
2327 out:
2328 return ret;
2329 }
2330
2331 /* when a block goes through cow, we update the reference counts of
2332 * everything that block points to. The internal pointers of the block
2333 * can be in just about any order, and it is likely to have clusters of
2334 * things that are close together and clusters of things that are not.
2335 *
2336 * To help reduce the seeks that come with updating all of these reference
2337 * counts, sort them by byte number before actual updates are done.
2338 *
2339 * struct refsort is used to match byte number to slot in the btree block.
2340 * we sort based on the byte number and then use the slot to actually
2341 * find the item.
2342 *
2343 * struct refsort is smaller than strcut btrfs_item and smaller than
2344 * struct btrfs_key_ptr. Since we're currently limited to the page size
2345 * for a btree block, there's no way for a kmalloc of refsorts for a
2346 * single node to be bigger than a page.
2347 */
2348 struct refsort {
2349 u64 bytenr;
2350 u32 slot;
2351 };
2352
2353 /*
2354 * for passing into sort()
2355 */
2356 static int refsort_cmp(const void *a_void, const void *b_void)
2357 {
2358 const struct refsort *a = a_void;
2359 const struct refsort *b = b_void;
2360
2361 if (a->bytenr < b->bytenr)
2362 return -1;
2363 if (a->bytenr > b->bytenr)
2364 return 1;
2365 return 0;
2366 }
2367 #endif
2368
2369 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2370 struct btrfs_root *root,
2371 struct extent_buffer *buf,
2372 int full_backref, int inc)
2373 {
2374 u64 bytenr;
2375 u64 num_bytes;
2376 u64 parent;
2377 u64 ref_root;
2378 u32 nritems;
2379 struct btrfs_key key;
2380 struct btrfs_file_extent_item *fi;
2381 int i;
2382 int level;
2383 int ret = 0;
2384 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2385 u64, u64, u64, u64, u64, u64);
2386
2387 ref_root = btrfs_header_owner(buf);
2388 nritems = btrfs_header_nritems(buf);
2389 level = btrfs_header_level(buf);
2390
2391 if (!root->ref_cows && level == 0)
2392 return 0;
2393
2394 if (inc)
2395 process_func = btrfs_inc_extent_ref;
2396 else
2397 process_func = btrfs_free_extent;
2398
2399 if (full_backref)
2400 parent = buf->start;
2401 else
2402 parent = 0;
2403
2404 for (i = 0; i < nritems; i++) {
2405 if (level == 0) {
2406 btrfs_item_key_to_cpu(buf, &key, i);
2407 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2408 continue;
2409 fi = btrfs_item_ptr(buf, i,
2410 struct btrfs_file_extent_item);
2411 if (btrfs_file_extent_type(buf, fi) ==
2412 BTRFS_FILE_EXTENT_INLINE)
2413 continue;
2414 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2415 if (bytenr == 0)
2416 continue;
2417
2418 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2419 key.offset -= btrfs_file_extent_offset(buf, fi);
2420 ret = process_func(trans, root, bytenr, num_bytes,
2421 parent, ref_root, key.objectid,
2422 key.offset);
2423 if (ret)
2424 goto fail;
2425 } else {
2426 bytenr = btrfs_node_blockptr(buf, i);
2427 num_bytes = btrfs_level_size(root, level - 1);
2428 ret = process_func(trans, root, bytenr, num_bytes,
2429 parent, ref_root, level - 1, 0);
2430 if (ret)
2431 goto fail;
2432 }
2433 }
2434 return 0;
2435 fail:
2436 BUG();
2437 return ret;
2438 }
2439
2440 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2441 struct extent_buffer *buf, int full_backref)
2442 {
2443 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2444 }
2445
2446 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2447 struct extent_buffer *buf, int full_backref)
2448 {
2449 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2450 }
2451
2452 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2453 struct btrfs_root *root,
2454 struct btrfs_path *path,
2455 struct btrfs_block_group_cache *cache)
2456 {
2457 int ret;
2458 struct btrfs_root *extent_root = root->fs_info->extent_root;
2459 unsigned long bi;
2460 struct extent_buffer *leaf;
2461
2462 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2463 if (ret < 0)
2464 goto fail;
2465 BUG_ON(ret);
2466
2467 leaf = path->nodes[0];
2468 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2469 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2470 btrfs_mark_buffer_dirty(leaf);
2471 btrfs_release_path(extent_root, path);
2472 fail:
2473 if (ret)
2474 return ret;
2475 return 0;
2476
2477 }
2478
2479 static struct btrfs_block_group_cache *
2480 next_block_group(struct btrfs_root *root,
2481 struct btrfs_block_group_cache *cache)
2482 {
2483 struct rb_node *node;
2484 spin_lock(&root->fs_info->block_group_cache_lock);
2485 node = rb_next(&cache->cache_node);
2486 btrfs_put_block_group(cache);
2487 if (node) {
2488 cache = rb_entry(node, struct btrfs_block_group_cache,
2489 cache_node);
2490 atomic_inc(&cache->count);
2491 } else
2492 cache = NULL;
2493 spin_unlock(&root->fs_info->block_group_cache_lock);
2494 return cache;
2495 }
2496
2497 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2498 struct btrfs_root *root)
2499 {
2500 struct btrfs_block_group_cache *cache;
2501 int err = 0;
2502 struct btrfs_path *path;
2503 u64 last = 0;
2504
2505 path = btrfs_alloc_path();
2506 if (!path)
2507 return -ENOMEM;
2508
2509 while (1) {
2510 if (last == 0) {
2511 err = btrfs_run_delayed_refs(trans, root,
2512 (unsigned long)-1);
2513 BUG_ON(err);
2514 }
2515
2516 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2517 while (cache) {
2518 if (cache->dirty)
2519 break;
2520 cache = next_block_group(root, cache);
2521 }
2522 if (!cache) {
2523 if (last == 0)
2524 break;
2525 last = 0;
2526 continue;
2527 }
2528
2529 cache->dirty = 0;
2530 last = cache->key.objectid + cache->key.offset;
2531
2532 err = write_one_cache_group(trans, root, path, cache);
2533 BUG_ON(err);
2534 btrfs_put_block_group(cache);
2535 }
2536
2537 btrfs_free_path(path);
2538 return 0;
2539 }
2540
2541 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2542 {
2543 struct btrfs_block_group_cache *block_group;
2544 int readonly = 0;
2545
2546 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2547 if (!block_group || block_group->ro)
2548 readonly = 1;
2549 if (block_group)
2550 btrfs_put_block_group(block_group);
2551 return readonly;
2552 }
2553
2554 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2555 u64 total_bytes, u64 bytes_used,
2556 struct btrfs_space_info **space_info)
2557 {
2558 struct btrfs_space_info *found;
2559
2560 found = __find_space_info(info, flags);
2561 if (found) {
2562 spin_lock(&found->lock);
2563 found->total_bytes += total_bytes;
2564 found->bytes_used += bytes_used;
2565 found->full = 0;
2566 spin_unlock(&found->lock);
2567 *space_info = found;
2568 return 0;
2569 }
2570 found = kzalloc(sizeof(*found), GFP_NOFS);
2571 if (!found)
2572 return -ENOMEM;
2573
2574 INIT_LIST_HEAD(&found->block_groups);
2575 init_rwsem(&found->groups_sem);
2576 spin_lock_init(&found->lock);
2577 found->flags = flags;
2578 found->total_bytes = total_bytes;
2579 found->bytes_used = bytes_used;
2580 found->bytes_pinned = 0;
2581 found->bytes_reserved = 0;
2582 found->bytes_readonly = 0;
2583 found->bytes_delalloc = 0;
2584 found->full = 0;
2585 found->force_alloc = 0;
2586 *space_info = found;
2587 list_add_rcu(&found->list, &info->space_info);
2588 atomic_set(&found->caching_threads, 0);
2589 return 0;
2590 }
2591
2592 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2593 {
2594 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2595 BTRFS_BLOCK_GROUP_RAID1 |
2596 BTRFS_BLOCK_GROUP_RAID10 |
2597 BTRFS_BLOCK_GROUP_DUP);
2598 if (extra_flags) {
2599 if (flags & BTRFS_BLOCK_GROUP_DATA)
2600 fs_info->avail_data_alloc_bits |= extra_flags;
2601 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2602 fs_info->avail_metadata_alloc_bits |= extra_flags;
2603 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2604 fs_info->avail_system_alloc_bits |= extra_flags;
2605 }
2606 }
2607
2608 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2609 {
2610 spin_lock(&cache->space_info->lock);
2611 spin_lock(&cache->lock);
2612 if (!cache->ro) {
2613 cache->space_info->bytes_readonly += cache->key.offset -
2614 btrfs_block_group_used(&cache->item);
2615 cache->ro = 1;
2616 }
2617 spin_unlock(&cache->lock);
2618 spin_unlock(&cache->space_info->lock);
2619 }
2620
2621 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2622 {
2623 u64 num_devices = root->fs_info->fs_devices->rw_devices;
2624
2625 if (num_devices == 1)
2626 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2627 if (num_devices < 4)
2628 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2629
2630 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2631 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2632 BTRFS_BLOCK_GROUP_RAID10))) {
2633 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2634 }
2635
2636 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2637 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2638 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2639 }
2640
2641 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2642 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2643 (flags & BTRFS_BLOCK_GROUP_RAID10) |
2644 (flags & BTRFS_BLOCK_GROUP_DUP)))
2645 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2646 return flags;
2647 }
2648
2649 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2650 {
2651 struct btrfs_fs_info *info = root->fs_info;
2652 u64 alloc_profile;
2653
2654 if (data) {
2655 alloc_profile = info->avail_data_alloc_bits &
2656 info->data_alloc_profile;
2657 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2658 } else if (root == root->fs_info->chunk_root) {
2659 alloc_profile = info->avail_system_alloc_bits &
2660 info->system_alloc_profile;
2661 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2662 } else {
2663 alloc_profile = info->avail_metadata_alloc_bits &
2664 info->metadata_alloc_profile;
2665 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2666 }
2667
2668 return btrfs_reduce_alloc_profile(root, data);
2669 }
2670
2671 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2672 {
2673 u64 alloc_target;
2674
2675 alloc_target = btrfs_get_alloc_profile(root, 1);
2676 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2677 alloc_target);
2678 }
2679
2680 /*
2681 * for now this just makes sure we have at least 5% of our metadata space free
2682 * for use.
2683 */
2684 int btrfs_check_metadata_free_space(struct btrfs_root *root)
2685 {
2686 struct btrfs_fs_info *info = root->fs_info;
2687 struct btrfs_space_info *meta_sinfo;
2688 u64 alloc_target, thresh;
2689 int committed = 0, ret;
2690
2691 /* get the space info for where the metadata will live */
2692 alloc_target = btrfs_get_alloc_profile(root, 0);
2693 meta_sinfo = __find_space_info(info, alloc_target);
2694
2695 again:
2696 spin_lock(&meta_sinfo->lock);
2697 if (!meta_sinfo->full)
2698 thresh = meta_sinfo->total_bytes * 80;
2699 else
2700 thresh = meta_sinfo->total_bytes * 95;
2701
2702 do_div(thresh, 100);
2703
2704 if (meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2705 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly > thresh) {
2706 struct btrfs_trans_handle *trans;
2707 if (!meta_sinfo->full) {
2708 meta_sinfo->force_alloc = 1;
2709 spin_unlock(&meta_sinfo->lock);
2710
2711 trans = btrfs_start_transaction(root, 1);
2712 if (!trans)
2713 return -ENOMEM;
2714
2715 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2716 2 * 1024 * 1024, alloc_target, 0);
2717 btrfs_end_transaction(trans, root);
2718 goto again;
2719 }
2720 spin_unlock(&meta_sinfo->lock);
2721
2722 if (!committed) {
2723 committed = 1;
2724 trans = btrfs_join_transaction(root, 1);
2725 if (!trans)
2726 return -ENOMEM;
2727 ret = btrfs_commit_transaction(trans, root);
2728 if (ret)
2729 return ret;
2730 goto again;
2731 }
2732 return -ENOSPC;
2733 }
2734 spin_unlock(&meta_sinfo->lock);
2735
2736 return 0;
2737 }
2738
2739 /*
2740 * This will check the space that the inode allocates from to make sure we have
2741 * enough space for bytes.
2742 */
2743 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
2744 u64 bytes)
2745 {
2746 struct btrfs_space_info *data_sinfo;
2747 int ret = 0, committed = 0;
2748
2749 /* make sure bytes are sectorsize aligned */
2750 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2751
2752 data_sinfo = BTRFS_I(inode)->space_info;
2753 again:
2754 /* make sure we have enough space to handle the data first */
2755 spin_lock(&data_sinfo->lock);
2756 if (data_sinfo->total_bytes - data_sinfo->bytes_used -
2757 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
2758 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
2759 data_sinfo->bytes_may_use < bytes) {
2760 struct btrfs_trans_handle *trans;
2761
2762 /*
2763 * if we don't have enough free bytes in this space then we need
2764 * to alloc a new chunk.
2765 */
2766 if (!data_sinfo->full) {
2767 u64 alloc_target;
2768
2769 data_sinfo->force_alloc = 1;
2770 spin_unlock(&data_sinfo->lock);
2771
2772 alloc_target = btrfs_get_alloc_profile(root, 1);
2773 trans = btrfs_start_transaction(root, 1);
2774 if (!trans)
2775 return -ENOMEM;
2776
2777 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2778 bytes + 2 * 1024 * 1024,
2779 alloc_target, 0);
2780 btrfs_end_transaction(trans, root);
2781 if (ret)
2782 return ret;
2783 goto again;
2784 }
2785 spin_unlock(&data_sinfo->lock);
2786
2787 /* commit the current transaction and try again */
2788 if (!committed) {
2789 committed = 1;
2790 trans = btrfs_join_transaction(root, 1);
2791 if (!trans)
2792 return -ENOMEM;
2793 ret = btrfs_commit_transaction(trans, root);
2794 if (ret)
2795 return ret;
2796 goto again;
2797 }
2798
2799 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
2800 ", %llu bytes_used, %llu bytes_reserved, "
2801 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
2802 "%llu total\n", (unsigned long long)bytes,
2803 (unsigned long long)data_sinfo->bytes_delalloc,
2804 (unsigned long long)data_sinfo->bytes_used,
2805 (unsigned long long)data_sinfo->bytes_reserved,
2806 (unsigned long long)data_sinfo->bytes_pinned,
2807 (unsigned long long)data_sinfo->bytes_readonly,
2808 (unsigned long long)data_sinfo->bytes_may_use,
2809 (unsigned long long)data_sinfo->total_bytes);
2810 return -ENOSPC;
2811 }
2812 data_sinfo->bytes_may_use += bytes;
2813 BTRFS_I(inode)->reserved_bytes += bytes;
2814 spin_unlock(&data_sinfo->lock);
2815
2816 return btrfs_check_metadata_free_space(root);
2817 }
2818
2819 /*
2820 * if there was an error for whatever reason after calling
2821 * btrfs_check_data_free_space, call this so we can cleanup the counters.
2822 */
2823 void btrfs_free_reserved_data_space(struct btrfs_root *root,
2824 struct inode *inode, u64 bytes)
2825 {
2826 struct btrfs_space_info *data_sinfo;
2827
2828 /* make sure bytes are sectorsize aligned */
2829 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
2830
2831 data_sinfo = BTRFS_I(inode)->space_info;
2832 spin_lock(&data_sinfo->lock);
2833 data_sinfo->bytes_may_use -= bytes;
2834 BTRFS_I(inode)->reserved_bytes -= bytes;
2835 spin_unlock(&data_sinfo->lock);
2836 }
2837
2838 /* called when we are adding a delalloc extent to the inode's io_tree */
2839 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
2840 u64 bytes)
2841 {
2842 struct btrfs_space_info *data_sinfo;
2843
2844 /* get the space info for where this inode will be storing its data */
2845 data_sinfo = BTRFS_I(inode)->space_info;
2846
2847 /* make sure we have enough space to handle the data first */
2848 spin_lock(&data_sinfo->lock);
2849 data_sinfo->bytes_delalloc += bytes;
2850
2851 /*
2852 * we are adding a delalloc extent without calling
2853 * btrfs_check_data_free_space first. This happens on a weird
2854 * writepage condition, but shouldn't hurt our accounting
2855 */
2856 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
2857 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
2858 BTRFS_I(inode)->reserved_bytes = 0;
2859 } else {
2860 data_sinfo->bytes_may_use -= bytes;
2861 BTRFS_I(inode)->reserved_bytes -= bytes;
2862 }
2863
2864 spin_unlock(&data_sinfo->lock);
2865 }
2866
2867 /* called when we are clearing an delalloc extent from the inode's io_tree */
2868 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
2869 u64 bytes)
2870 {
2871 struct btrfs_space_info *info;
2872
2873 info = BTRFS_I(inode)->space_info;
2874
2875 spin_lock(&info->lock);
2876 info->bytes_delalloc -= bytes;
2877 spin_unlock(&info->lock);
2878 }
2879
2880 static void force_metadata_allocation(struct btrfs_fs_info *info)
2881 {
2882 struct list_head *head = &info->space_info;
2883 struct btrfs_space_info *found;
2884
2885 rcu_read_lock();
2886 list_for_each_entry_rcu(found, head, list) {
2887 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
2888 found->force_alloc = 1;
2889 }
2890 rcu_read_unlock();
2891 }
2892
2893 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
2894 struct btrfs_root *extent_root, u64 alloc_bytes,
2895 u64 flags, int force)
2896 {
2897 struct btrfs_space_info *space_info;
2898 struct btrfs_fs_info *fs_info = extent_root->fs_info;
2899 u64 thresh;
2900 int ret = 0;
2901
2902 mutex_lock(&fs_info->chunk_mutex);
2903
2904 flags = btrfs_reduce_alloc_profile(extent_root, flags);
2905
2906 space_info = __find_space_info(extent_root->fs_info, flags);
2907 if (!space_info) {
2908 ret = update_space_info(extent_root->fs_info, flags,
2909 0, 0, &space_info);
2910 BUG_ON(ret);
2911 }
2912 BUG_ON(!space_info);
2913
2914 spin_lock(&space_info->lock);
2915 if (space_info->force_alloc) {
2916 force = 1;
2917 space_info->force_alloc = 0;
2918 }
2919 if (space_info->full) {
2920 spin_unlock(&space_info->lock);
2921 goto out;
2922 }
2923
2924 thresh = space_info->total_bytes - space_info->bytes_readonly;
2925 thresh = div_factor(thresh, 6);
2926 if (!force &&
2927 (space_info->bytes_used + space_info->bytes_pinned +
2928 space_info->bytes_reserved + alloc_bytes) < thresh) {
2929 spin_unlock(&space_info->lock);
2930 goto out;
2931 }
2932 spin_unlock(&space_info->lock);
2933
2934 /*
2935 * if we're doing a data chunk, go ahead and make sure that
2936 * we keep a reasonable number of metadata chunks allocated in the
2937 * FS as well.
2938 */
2939 if (flags & BTRFS_BLOCK_GROUP_DATA) {
2940 fs_info->data_chunk_allocations++;
2941 if (!(fs_info->data_chunk_allocations %
2942 fs_info->metadata_ratio))
2943 force_metadata_allocation(fs_info);
2944 }
2945
2946 ret = btrfs_alloc_chunk(trans, extent_root, flags);
2947 if (ret)
2948 space_info->full = 1;
2949 out:
2950 mutex_unlock(&extent_root->fs_info->chunk_mutex);
2951 return ret;
2952 }
2953
2954 static int update_block_group(struct btrfs_trans_handle *trans,
2955 struct btrfs_root *root,
2956 u64 bytenr, u64 num_bytes, int alloc,
2957 int mark_free)
2958 {
2959 struct btrfs_block_group_cache *cache;
2960 struct btrfs_fs_info *info = root->fs_info;
2961 u64 total = num_bytes;
2962 u64 old_val;
2963 u64 byte_in_group;
2964
2965 /* block accounting for super block */
2966 spin_lock(&info->delalloc_lock);
2967 old_val = btrfs_super_bytes_used(&info->super_copy);
2968 if (alloc)
2969 old_val += num_bytes;
2970 else
2971 old_val -= num_bytes;
2972 btrfs_set_super_bytes_used(&info->super_copy, old_val);
2973
2974 /* block accounting for root item */
2975 old_val = btrfs_root_used(&root->root_item);
2976 if (alloc)
2977 old_val += num_bytes;
2978 else
2979 old_val -= num_bytes;
2980 btrfs_set_root_used(&root->root_item, old_val);
2981 spin_unlock(&info->delalloc_lock);
2982
2983 while (total) {
2984 cache = btrfs_lookup_block_group(info, bytenr);
2985 if (!cache)
2986 return -1;
2987 byte_in_group = bytenr - cache->key.objectid;
2988 WARN_ON(byte_in_group > cache->key.offset);
2989
2990 spin_lock(&cache->space_info->lock);
2991 spin_lock(&cache->lock);
2992 cache->dirty = 1;
2993 old_val = btrfs_block_group_used(&cache->item);
2994 num_bytes = min(total, cache->key.offset - byte_in_group);
2995 if (alloc) {
2996 old_val += num_bytes;
2997 cache->space_info->bytes_used += num_bytes;
2998 if (cache->ro)
2999 cache->space_info->bytes_readonly -= num_bytes;
3000 btrfs_set_block_group_used(&cache->item, old_val);
3001 spin_unlock(&cache->lock);
3002 spin_unlock(&cache->space_info->lock);
3003 } else {
3004 old_val -= num_bytes;
3005 cache->space_info->bytes_used -= num_bytes;
3006 if (cache->ro)
3007 cache->space_info->bytes_readonly += num_bytes;
3008 btrfs_set_block_group_used(&cache->item, old_val);
3009 spin_unlock(&cache->lock);
3010 spin_unlock(&cache->space_info->lock);
3011 if (mark_free) {
3012 int ret;
3013
3014 ret = btrfs_discard_extent(root, bytenr,
3015 num_bytes);
3016 WARN_ON(ret);
3017
3018 ret = btrfs_add_free_space(cache, bytenr,
3019 num_bytes);
3020 WARN_ON(ret);
3021 }
3022 }
3023 btrfs_put_block_group(cache);
3024 total -= num_bytes;
3025 bytenr += num_bytes;
3026 }
3027 return 0;
3028 }
3029
3030 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3031 {
3032 struct btrfs_block_group_cache *cache;
3033 u64 bytenr;
3034
3035 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3036 if (!cache)
3037 return 0;
3038
3039 bytenr = cache->key.objectid;
3040 btrfs_put_block_group(cache);
3041
3042 return bytenr;
3043 }
3044
3045 int btrfs_update_pinned_extents(struct btrfs_root *root,
3046 u64 bytenr, u64 num, int pin)
3047 {
3048 u64 len;
3049 struct btrfs_block_group_cache *cache;
3050 struct btrfs_fs_info *fs_info = root->fs_info;
3051
3052 if (pin)
3053 set_extent_dirty(&fs_info->pinned_extents,
3054 bytenr, bytenr + num - 1, GFP_NOFS);
3055
3056 while (num > 0) {
3057 cache = btrfs_lookup_block_group(fs_info, bytenr);
3058 BUG_ON(!cache);
3059 len = min(num, cache->key.offset -
3060 (bytenr - cache->key.objectid));
3061 if (pin) {
3062 spin_lock(&cache->space_info->lock);
3063 spin_lock(&cache->lock);
3064 cache->pinned += len;
3065 cache->space_info->bytes_pinned += len;
3066 spin_unlock(&cache->lock);
3067 spin_unlock(&cache->space_info->lock);
3068 fs_info->total_pinned += len;
3069 } else {
3070 int unpin = 0;
3071
3072 /*
3073 * in order to not race with the block group caching, we
3074 * only want to unpin the extent if we are cached. If
3075 * we aren't cached, we want to start async caching this
3076 * block group so we can free the extent the next time
3077 * around.
3078 */
3079 spin_lock(&cache->space_info->lock);
3080 spin_lock(&cache->lock);
3081 unpin = (cache->cached == BTRFS_CACHE_FINISHED);
3082 if (likely(unpin)) {
3083 cache->pinned -= len;
3084 cache->space_info->bytes_pinned -= len;
3085 fs_info->total_pinned -= len;
3086 }
3087 spin_unlock(&cache->lock);
3088 spin_unlock(&cache->space_info->lock);
3089
3090 if (likely(unpin))
3091 clear_extent_dirty(&fs_info->pinned_extents,
3092 bytenr, bytenr + len -1,
3093 GFP_NOFS);
3094 else
3095 cache_block_group(cache);
3096
3097 if (unpin)
3098 btrfs_add_free_space(cache, bytenr, len);
3099 }
3100 btrfs_put_block_group(cache);
3101 bytenr += len;
3102 num -= len;
3103 }
3104 return 0;
3105 }
3106
3107 static int update_reserved_extents(struct btrfs_root *root,
3108 u64 bytenr, u64 num, int reserve)
3109 {
3110 u64 len;
3111 struct btrfs_block_group_cache *cache;
3112 struct btrfs_fs_info *fs_info = root->fs_info;
3113
3114 while (num > 0) {
3115 cache = btrfs_lookup_block_group(fs_info, bytenr);
3116 BUG_ON(!cache);
3117 len = min(num, cache->key.offset -
3118 (bytenr - cache->key.objectid));
3119
3120 spin_lock(&cache->space_info->lock);
3121 spin_lock(&cache->lock);
3122 if (reserve) {
3123 cache->reserved += len;
3124 cache->space_info->bytes_reserved += len;
3125 } else {
3126 cache->reserved -= len;
3127 cache->space_info->bytes_reserved -= len;
3128 }
3129 spin_unlock(&cache->lock);
3130 spin_unlock(&cache->space_info->lock);
3131 btrfs_put_block_group(cache);
3132 bytenr += len;
3133 num -= len;
3134 }
3135 return 0;
3136 }
3137
3138 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
3139 {
3140 u64 last = 0;
3141 u64 start;
3142 u64 end;
3143 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
3144 int ret;
3145
3146 while (1) {
3147 ret = find_first_extent_bit(pinned_extents, last,
3148 &start, &end, EXTENT_DIRTY);
3149 if (ret)
3150 break;
3151
3152 set_extent_dirty(copy, start, end, GFP_NOFS);
3153 last = end + 1;
3154 }
3155 return 0;
3156 }
3157
3158 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3159 struct btrfs_root *root,
3160 struct extent_io_tree *unpin)
3161 {
3162 u64 start;
3163 u64 end;
3164 int ret;
3165
3166 while (1) {
3167 ret = find_first_extent_bit(unpin, 0, &start, &end,
3168 EXTENT_DIRTY);
3169 if (ret)
3170 break;
3171
3172 ret = btrfs_discard_extent(root, start, end + 1 - start);
3173
3174 /* unlocks the pinned mutex */
3175 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
3176 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3177
3178 cond_resched();
3179 }
3180
3181 return ret;
3182 }
3183
3184 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3185 struct btrfs_root *root,
3186 struct btrfs_path *path,
3187 u64 bytenr, u64 num_bytes, int is_data,
3188 struct extent_buffer **must_clean)
3189 {
3190 int err = 0;
3191 struct extent_buffer *buf;
3192
3193 if (is_data)
3194 goto pinit;
3195
3196 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3197 if (!buf)
3198 goto pinit;
3199
3200 /* we can reuse a block if it hasn't been written
3201 * and it is from this transaction. We can't
3202 * reuse anything from the tree log root because
3203 * it has tiny sub-transactions.
3204 */
3205 if (btrfs_buffer_uptodate(buf, 0) &&
3206 btrfs_try_tree_lock(buf)) {
3207 u64 header_owner = btrfs_header_owner(buf);
3208 u64 header_transid = btrfs_header_generation(buf);
3209 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3210 header_transid == trans->transid &&
3211 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3212 *must_clean = buf;
3213 return 1;
3214 }
3215 btrfs_tree_unlock(buf);
3216 }
3217 free_extent_buffer(buf);
3218 pinit:
3219 btrfs_set_path_blocking(path);
3220 /* unlocks the pinned mutex */
3221 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
3222
3223 BUG_ON(err < 0);
3224 return 0;
3225 }
3226
3227
3228 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3229 struct btrfs_root *root,
3230 u64 bytenr, u64 num_bytes, u64 parent,
3231 u64 root_objectid, u64 owner_objectid,
3232 u64 owner_offset, int refs_to_drop,
3233 struct btrfs_delayed_extent_op *extent_op)
3234 {
3235 struct btrfs_key key;
3236 struct btrfs_path *path;
3237 struct btrfs_fs_info *info = root->fs_info;
3238 struct btrfs_root *extent_root = info->extent_root;
3239 struct extent_buffer *leaf;
3240 struct btrfs_extent_item *ei;
3241 struct btrfs_extent_inline_ref *iref;
3242 int ret;
3243 int is_data;
3244 int extent_slot = 0;
3245 int found_extent = 0;
3246 int num_to_del = 1;
3247 u32 item_size;
3248 u64 refs;
3249
3250 path = btrfs_alloc_path();
3251 if (!path)
3252 return -ENOMEM;
3253
3254 path->reada = 1;
3255 path->leave_spinning = 1;
3256
3257 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3258 BUG_ON(!is_data && refs_to_drop != 1);
3259
3260 ret = lookup_extent_backref(trans, extent_root, path, &iref,
3261 bytenr, num_bytes, parent,
3262 root_objectid, owner_objectid,
3263 owner_offset);
3264 if (ret == 0) {
3265 extent_slot = path->slots[0];
3266 while (extent_slot >= 0) {
3267 btrfs_item_key_to_cpu(path->nodes[0], &key,
3268 extent_slot);
3269 if (key.objectid != bytenr)
3270 break;
3271 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3272 key.offset == num_bytes) {
3273 found_extent = 1;
3274 break;
3275 }
3276 if (path->slots[0] - extent_slot > 5)
3277 break;
3278 extent_slot--;
3279 }
3280 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3281 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3282 if (found_extent && item_size < sizeof(*ei))
3283 found_extent = 0;
3284 #endif
3285 if (!found_extent) {
3286 BUG_ON(iref);
3287 ret = remove_extent_backref(trans, extent_root, path,
3288 NULL, refs_to_drop,
3289 is_data);
3290 BUG_ON(ret);
3291 btrfs_release_path(extent_root, path);
3292 path->leave_spinning = 1;
3293
3294 key.objectid = bytenr;
3295 key.type = BTRFS_EXTENT_ITEM_KEY;
3296 key.offset = num_bytes;
3297
3298 ret = btrfs_search_slot(trans, extent_root,
3299 &key, path, -1, 1);
3300 if (ret) {
3301 printk(KERN_ERR "umm, got %d back from search"
3302 ", was looking for %llu\n", ret,
3303 (unsigned long long)bytenr);
3304 btrfs_print_leaf(extent_root, path->nodes[0]);
3305 }
3306 BUG_ON(ret);
3307 extent_slot = path->slots[0];
3308 }
3309 } else {
3310 btrfs_print_leaf(extent_root, path->nodes[0]);
3311 WARN_ON(1);
3312 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3313 "parent %llu root %llu owner %llu offset %llu\n",
3314 (unsigned long long)bytenr,
3315 (unsigned long long)parent,
3316 (unsigned long long)root_objectid,
3317 (unsigned long long)owner_objectid,
3318 (unsigned long long)owner_offset);
3319 }
3320
3321 leaf = path->nodes[0];
3322 item_size = btrfs_item_size_nr(leaf, extent_slot);
3323 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3324 if (item_size < sizeof(*ei)) {
3325 BUG_ON(found_extent || extent_slot != path->slots[0]);
3326 ret = convert_extent_item_v0(trans, extent_root, path,
3327 owner_objectid, 0);
3328 BUG_ON(ret < 0);
3329
3330 btrfs_release_path(extent_root, path);
3331 path->leave_spinning = 1;
3332
3333 key.objectid = bytenr;
3334 key.type = BTRFS_EXTENT_ITEM_KEY;
3335 key.offset = num_bytes;
3336
3337 ret = btrfs_search_slot(trans, extent_root, &key, path,
3338 -1, 1);
3339 if (ret) {
3340 printk(KERN_ERR "umm, got %d back from search"
3341 ", was looking for %llu\n", ret,
3342 (unsigned long long)bytenr);
3343 btrfs_print_leaf(extent_root, path->nodes[0]);
3344 }
3345 BUG_ON(ret);
3346 extent_slot = path->slots[0];
3347 leaf = path->nodes[0];
3348 item_size = btrfs_item_size_nr(leaf, extent_slot);
3349 }
3350 #endif
3351 BUG_ON(item_size < sizeof(*ei));
3352 ei = btrfs_item_ptr(leaf, extent_slot,
3353 struct btrfs_extent_item);
3354 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3355 struct btrfs_tree_block_info *bi;
3356 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3357 bi = (struct btrfs_tree_block_info *)(ei + 1);
3358 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3359 }
3360
3361 refs = btrfs_extent_refs(leaf, ei);
3362 BUG_ON(refs < refs_to_drop);
3363 refs -= refs_to_drop;
3364
3365 if (refs > 0) {
3366 if (extent_op)
3367 __run_delayed_extent_op(extent_op, leaf, ei);
3368 /*
3369 * In the case of inline back ref, reference count will
3370 * be updated by remove_extent_backref
3371 */
3372 if (iref) {
3373 BUG_ON(!found_extent);
3374 } else {
3375 btrfs_set_extent_refs(leaf, ei, refs);
3376 btrfs_mark_buffer_dirty(leaf);
3377 }
3378 if (found_extent) {
3379 ret = remove_extent_backref(trans, extent_root, path,
3380 iref, refs_to_drop,
3381 is_data);
3382 BUG_ON(ret);
3383 }
3384 } else {
3385 int mark_free = 0;
3386 struct extent_buffer *must_clean = NULL;
3387
3388 if (found_extent) {
3389 BUG_ON(is_data && refs_to_drop !=
3390 extent_data_ref_count(root, path, iref));
3391 if (iref) {
3392 BUG_ON(path->slots[0] != extent_slot);
3393 } else {
3394 BUG_ON(path->slots[0] != extent_slot + 1);
3395 path->slots[0] = extent_slot;
3396 num_to_del = 2;
3397 }
3398 }
3399
3400 ret = pin_down_bytes(trans, root, path, bytenr,
3401 num_bytes, is_data, &must_clean);
3402 if (ret > 0)
3403 mark_free = 1;
3404 BUG_ON(ret < 0);
3405 /*
3406 * it is going to be very rare for someone to be waiting
3407 * on the block we're freeing. del_items might need to
3408 * schedule, so rather than get fancy, just force it
3409 * to blocking here
3410 */
3411 if (must_clean)
3412 btrfs_set_lock_blocking(must_clean);
3413
3414 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3415 num_to_del);
3416 BUG_ON(ret);
3417 btrfs_release_path(extent_root, path);
3418
3419 if (must_clean) {
3420 clean_tree_block(NULL, root, must_clean);
3421 btrfs_tree_unlock(must_clean);
3422 free_extent_buffer(must_clean);
3423 }
3424
3425 if (is_data) {
3426 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3427 BUG_ON(ret);
3428 } else {
3429 invalidate_mapping_pages(info->btree_inode->i_mapping,
3430 bytenr >> PAGE_CACHE_SHIFT,
3431 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3432 }
3433
3434 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3435 mark_free);
3436 BUG_ON(ret);
3437 }
3438 btrfs_free_path(path);
3439 return ret;
3440 }
3441
3442 /*
3443 * when we free an extent, it is possible (and likely) that we free the last
3444 * delayed ref for that extent as well. This searches the delayed ref tree for
3445 * a given extent, and if there are no other delayed refs to be processed, it
3446 * removes it from the tree.
3447 */
3448 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3449 struct btrfs_root *root, u64 bytenr)
3450 {
3451 struct btrfs_delayed_ref_head *head;
3452 struct btrfs_delayed_ref_root *delayed_refs;
3453 struct btrfs_delayed_ref_node *ref;
3454 struct rb_node *node;
3455 int ret;
3456
3457 delayed_refs = &trans->transaction->delayed_refs;
3458 spin_lock(&delayed_refs->lock);
3459 head = btrfs_find_delayed_ref_head(trans, bytenr);
3460 if (!head)
3461 goto out;
3462
3463 node = rb_prev(&head->node.rb_node);
3464 if (!node)
3465 goto out;
3466
3467 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3468
3469 /* there are still entries for this ref, we can't drop it */
3470 if (ref->bytenr == bytenr)
3471 goto out;
3472
3473 if (head->extent_op) {
3474 if (!head->must_insert_reserved)
3475 goto out;
3476 kfree(head->extent_op);
3477 head->extent_op = NULL;
3478 }
3479
3480 /*
3481 * waiting for the lock here would deadlock. If someone else has it
3482 * locked they are already in the process of dropping it anyway
3483 */
3484 if (!mutex_trylock(&head->mutex))
3485 goto out;
3486
3487 /*
3488 * at this point we have a head with no other entries. Go
3489 * ahead and process it.
3490 */
3491 head->node.in_tree = 0;
3492 rb_erase(&head->node.rb_node, &delayed_refs->root);
3493
3494 delayed_refs->num_entries--;
3495
3496 /*
3497 * we don't take a ref on the node because we're removing it from the
3498 * tree, so we just steal the ref the tree was holding.
3499 */
3500 delayed_refs->num_heads--;
3501 if (list_empty(&head->cluster))
3502 delayed_refs->num_heads_ready--;
3503
3504 list_del_init(&head->cluster);
3505 spin_unlock(&delayed_refs->lock);
3506
3507 ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
3508 &head->node, head->extent_op,
3509 head->must_insert_reserved);
3510 BUG_ON(ret);
3511 btrfs_put_delayed_ref(&head->node);
3512 return 0;
3513 out:
3514 spin_unlock(&delayed_refs->lock);
3515 return 0;
3516 }
3517
3518 int btrfs_free_extent(struct btrfs_trans_handle *trans,
3519 struct btrfs_root *root,
3520 u64 bytenr, u64 num_bytes, u64 parent,
3521 u64 root_objectid, u64 owner, u64 offset)
3522 {
3523 int ret;
3524
3525 /*
3526 * tree log blocks never actually go into the extent allocation
3527 * tree, just update pinning info and exit early.
3528 */
3529 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
3530 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
3531 /* unlocks the pinned mutex */
3532 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
3533 update_reserved_extents(root, bytenr, num_bytes, 0);
3534 ret = 0;
3535 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
3536 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
3537 parent, root_objectid, (int)owner,
3538 BTRFS_DROP_DELAYED_REF, NULL);
3539 BUG_ON(ret);
3540 ret = check_ref_cleanup(trans, root, bytenr);
3541 BUG_ON(ret);
3542 } else {
3543 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3544 parent, root_objectid, owner,
3545 offset, BTRFS_DROP_DELAYED_REF, NULL);
3546 BUG_ON(ret);
3547 }
3548 return ret;
3549 }
3550
3551 static u64 stripe_align(struct btrfs_root *root, u64 val)
3552 {
3553 u64 mask = ((u64)root->stripesize - 1);
3554 u64 ret = (val + mask) & ~mask;
3555 return ret;
3556 }
3557
3558 /*
3559 * when we wait for progress in the block group caching, its because
3560 * our allocation attempt failed at least once. So, we must sleep
3561 * and let some progress happen before we try again.
3562 *
3563 * This function will sleep at least once waiting for new free space to
3564 * show up, and then it will check the block group free space numbers
3565 * for our min num_bytes. Another option is to have it go ahead
3566 * and look in the rbtree for a free extent of a given size, but this
3567 * is a good start.
3568 */
3569 static noinline int
3570 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3571 u64 num_bytes)
3572 {
3573 DEFINE_WAIT(wait);
3574
3575 prepare_to_wait(&cache->caching_q, &wait, TASK_UNINTERRUPTIBLE);
3576
3577 if (block_group_cache_done(cache)) {
3578 finish_wait(&cache->caching_q, &wait);
3579 return 0;
3580 }
3581 schedule();
3582 finish_wait(&cache->caching_q, &wait);
3583
3584 wait_event(cache->caching_q, block_group_cache_done(cache) ||
3585 (cache->free_space >= num_bytes));
3586 return 0;
3587 }
3588
3589 enum btrfs_loop_type {
3590 LOOP_CACHED_ONLY = 0,
3591 LOOP_CACHING_NOWAIT = 1,
3592 LOOP_CACHING_WAIT = 2,
3593 LOOP_ALLOC_CHUNK = 3,
3594 LOOP_NO_EMPTY_SIZE = 4,
3595 };
3596
3597 /*
3598 * walks the btree of allocated extents and find a hole of a given size.
3599 * The key ins is changed to record the hole:
3600 * ins->objectid == block start
3601 * ins->flags = BTRFS_EXTENT_ITEM_KEY
3602 * ins->offset == number of blocks
3603 * Any available blocks before search_start are skipped.
3604 */
3605 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
3606 struct btrfs_root *orig_root,
3607 u64 num_bytes, u64 empty_size,
3608 u64 search_start, u64 search_end,
3609 u64 hint_byte, struct btrfs_key *ins,
3610 u64 exclude_start, u64 exclude_nr,
3611 int data)
3612 {
3613 int ret = 0;
3614 struct btrfs_root *root = orig_root->fs_info->extent_root;
3615 struct btrfs_free_cluster *last_ptr = NULL;
3616 struct btrfs_block_group_cache *block_group = NULL;
3617 int empty_cluster = 2 * 1024 * 1024;
3618 int allowed_chunk_alloc = 0;
3619 struct btrfs_space_info *space_info;
3620 int last_ptr_loop = 0;
3621 int loop = 0;
3622 bool found_uncached_bg = false;
3623
3624 WARN_ON(num_bytes < root->sectorsize);
3625 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
3626 ins->objectid = 0;
3627 ins->offset = 0;
3628
3629 space_info = __find_space_info(root->fs_info, data);
3630
3631 if (orig_root->ref_cows || empty_size)
3632 allowed_chunk_alloc = 1;
3633
3634 if (data & BTRFS_BLOCK_GROUP_METADATA) {
3635 last_ptr = &root->fs_info->meta_alloc_cluster;
3636 if (!btrfs_test_opt(root, SSD))
3637 empty_cluster = 64 * 1024;
3638 }
3639
3640 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
3641 last_ptr = &root->fs_info->data_alloc_cluster;
3642 }
3643
3644 if (last_ptr) {
3645 spin_lock(&last_ptr->lock);
3646 if (last_ptr->block_group)
3647 hint_byte = last_ptr->window_start;
3648 spin_unlock(&last_ptr->lock);
3649 }
3650
3651 search_start = max(search_start, first_logical_byte(root, 0));
3652 search_start = max(search_start, hint_byte);
3653
3654 if (!last_ptr)
3655 empty_cluster = 0;
3656
3657 if (search_start == hint_byte) {
3658 block_group = btrfs_lookup_block_group(root->fs_info,
3659 search_start);
3660 /*
3661 * we don't want to use the block group if it doesn't match our
3662 * allocation bits, or if its not cached.
3663 */
3664 if (block_group && block_group_bits(block_group, data) &&
3665 block_group_cache_done(block_group)) {
3666 down_read(&space_info->groups_sem);
3667 if (list_empty(&block_group->list) ||
3668 block_group->ro) {
3669 /*
3670 * someone is removing this block group,
3671 * we can't jump into the have_block_group
3672 * target because our list pointers are not
3673 * valid
3674 */
3675 btrfs_put_block_group(block_group);
3676 up_read(&space_info->groups_sem);
3677 } else
3678 goto have_block_group;
3679 } else if (block_group) {
3680 btrfs_put_block_group(block_group);
3681 }
3682 }
3683
3684 search:
3685 down_read(&space_info->groups_sem);
3686 list_for_each_entry(block_group, &space_info->block_groups, list) {
3687 u64 offset;
3688 int cached;
3689
3690 atomic_inc(&block_group->count);
3691 search_start = block_group->key.objectid;
3692
3693 have_block_group:
3694 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
3695 /*
3696 * we want to start caching kthreads, but not too many
3697 * right off the bat so we don't overwhelm the system,
3698 * so only start them if there are less than 2 and we're
3699 * in the initial allocation phase.
3700 */
3701 if (loop > LOOP_CACHING_NOWAIT ||
3702 atomic_read(&space_info->caching_threads) < 2) {
3703 ret = cache_block_group(block_group);
3704 BUG_ON(ret);
3705 }
3706 }
3707
3708 cached = block_group_cache_done(block_group);
3709 if (unlikely(!cached)) {
3710 found_uncached_bg = true;
3711
3712 /* if we only want cached bgs, loop */
3713 if (loop == LOOP_CACHED_ONLY)
3714 goto loop;
3715 }
3716
3717 if (unlikely(block_group->ro))
3718 goto loop;
3719
3720 if (last_ptr) {
3721 /*
3722 * the refill lock keeps out other
3723 * people trying to start a new cluster
3724 */
3725 spin_lock(&last_ptr->refill_lock);
3726 if (last_ptr->block_group &&
3727 (last_ptr->block_group->ro ||
3728 !block_group_bits(last_ptr->block_group, data))) {
3729 offset = 0;
3730 goto refill_cluster;
3731 }
3732
3733 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
3734 num_bytes, search_start);
3735 if (offset) {
3736 /* we have a block, we're done */
3737 spin_unlock(&last_ptr->refill_lock);
3738 goto checks;
3739 }
3740
3741 spin_lock(&last_ptr->lock);
3742 /*
3743 * whoops, this cluster doesn't actually point to
3744 * this block group. Get a ref on the block
3745 * group is does point to and try again
3746 */
3747 if (!last_ptr_loop && last_ptr->block_group &&
3748 last_ptr->block_group != block_group) {
3749
3750 btrfs_put_block_group(block_group);
3751 block_group = last_ptr->block_group;
3752 atomic_inc(&block_group->count);
3753 spin_unlock(&last_ptr->lock);
3754 spin_unlock(&last_ptr->refill_lock);
3755
3756 last_ptr_loop = 1;
3757 search_start = block_group->key.objectid;
3758 /*
3759 * we know this block group is properly
3760 * in the list because
3761 * btrfs_remove_block_group, drops the
3762 * cluster before it removes the block
3763 * group from the list
3764 */
3765 goto have_block_group;
3766 }
3767 spin_unlock(&last_ptr->lock);
3768 refill_cluster:
3769 /*
3770 * this cluster didn't work out, free it and
3771 * start over
3772 */
3773 btrfs_return_cluster_to_free_space(NULL, last_ptr);
3774
3775 last_ptr_loop = 0;
3776
3777 /* allocate a cluster in this block group */
3778 ret = btrfs_find_space_cluster(trans, root,
3779 block_group, last_ptr,
3780 offset, num_bytes,
3781 empty_cluster + empty_size);
3782 if (ret == 0) {
3783 /*
3784 * now pull our allocation out of this
3785 * cluster
3786 */
3787 offset = btrfs_alloc_from_cluster(block_group,
3788 last_ptr, num_bytes,
3789 search_start);
3790 if (offset) {
3791 /* we found one, proceed */
3792 spin_unlock(&last_ptr->refill_lock);
3793 goto checks;
3794 }
3795 } else if (!cached && loop > LOOP_CACHING_NOWAIT) {
3796 spin_unlock(&last_ptr->refill_lock);
3797
3798 wait_block_group_cache_progress(block_group,
3799 num_bytes + empty_cluster + empty_size);
3800 goto have_block_group;
3801 }
3802
3803 /*
3804 * at this point we either didn't find a cluster
3805 * or we weren't able to allocate a block from our
3806 * cluster. Free the cluster we've been trying
3807 * to use, and go to the next block group
3808 */
3809 if (loop < LOOP_NO_EMPTY_SIZE) {
3810 btrfs_return_cluster_to_free_space(NULL,
3811 last_ptr);
3812 spin_unlock(&last_ptr->refill_lock);
3813 goto loop;
3814 }
3815 spin_unlock(&last_ptr->refill_lock);
3816 }
3817
3818 offset = btrfs_find_space_for_alloc(block_group, search_start,
3819 num_bytes, empty_size);
3820 if (!offset && (cached || (!cached &&
3821 loop == LOOP_CACHING_NOWAIT))) {
3822 goto loop;
3823 } else if (!offset && (!cached &&
3824 loop > LOOP_CACHING_NOWAIT)) {
3825 wait_block_group_cache_progress(block_group,
3826 num_bytes + empty_size);
3827 goto have_block_group;
3828 }
3829 checks:
3830 search_start = stripe_align(root, offset);
3831 /* move on to the next group */
3832 if (search_start + num_bytes >= search_end) {
3833 btrfs_add_free_space(block_group, offset, num_bytes);
3834 goto loop;
3835 }
3836
3837 /* move on to the next group */
3838 if (search_start + num_bytes >
3839 block_group->key.objectid + block_group->key.offset) {
3840 btrfs_add_free_space(block_group, offset, num_bytes);
3841 goto loop;
3842 }
3843
3844 if (exclude_nr > 0 &&
3845 (search_start + num_bytes > exclude_start &&
3846 search_start < exclude_start + exclude_nr)) {
3847 search_start = exclude_start + exclude_nr;
3848
3849 btrfs_add_free_space(block_group, offset, num_bytes);
3850 /*
3851 * if search_start is still in this block group
3852 * then we just re-search this block group
3853 */
3854 if (search_start >= block_group->key.objectid &&
3855 search_start < (block_group->key.objectid +
3856 block_group->key.offset))
3857 goto have_block_group;
3858 goto loop;
3859 }
3860
3861 ins->objectid = search_start;
3862 ins->offset = num_bytes;
3863
3864 if (offset < search_start)
3865 btrfs_add_free_space(block_group, offset,
3866 search_start - offset);
3867 BUG_ON(offset > search_start);
3868
3869 /* we are all good, lets return */
3870 break;
3871 loop:
3872 btrfs_put_block_group(block_group);
3873 }
3874 up_read(&space_info->groups_sem);
3875
3876 /* LOOP_CACHED_ONLY, only search fully cached block groups
3877 * LOOP_CACHING_NOWAIT, search partially cached block groups, but
3878 * dont wait foR them to finish caching
3879 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
3880 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
3881 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
3882 * again
3883 */
3884 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
3885 (found_uncached_bg || empty_size || empty_cluster ||
3886 allowed_chunk_alloc)) {
3887 if (found_uncached_bg) {
3888 found_uncached_bg = false;
3889 if (loop < LOOP_CACHING_WAIT) {
3890 loop++;
3891 goto search;
3892 }
3893 }
3894
3895 if (loop == LOOP_ALLOC_CHUNK) {
3896 empty_size = 0;
3897 empty_cluster = 0;
3898 }
3899
3900 if (allowed_chunk_alloc) {
3901 ret = do_chunk_alloc(trans, root, num_bytes +
3902 2 * 1024 * 1024, data, 1);
3903 allowed_chunk_alloc = 0;
3904 } else {
3905 space_info->force_alloc = 1;
3906 }
3907
3908 if (loop < LOOP_NO_EMPTY_SIZE) {
3909 loop++;
3910 goto search;
3911 }
3912 ret = -ENOSPC;
3913 } else if (!ins->objectid) {
3914 ret = -ENOSPC;
3915 }
3916
3917 /* we found what we needed */
3918 if (ins->objectid) {
3919 if (!(data & BTRFS_BLOCK_GROUP_DATA))
3920 trans->block_group = block_group->key.objectid;
3921
3922 btrfs_put_block_group(block_group);
3923 ret = 0;
3924 }
3925
3926 return ret;
3927 }
3928
3929 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3930 {
3931 struct btrfs_block_group_cache *cache;
3932
3933 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3934 (unsigned long long)(info->total_bytes - info->bytes_used -
3935 info->bytes_pinned - info->bytes_reserved),
3936 (info->full) ? "" : "not ");
3937 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
3938 " may_use=%llu, used=%llu\n",
3939 (unsigned long long)info->total_bytes,
3940 (unsigned long long)info->bytes_pinned,
3941 (unsigned long long)info->bytes_delalloc,
3942 (unsigned long long)info->bytes_may_use,
3943 (unsigned long long)info->bytes_used);
3944
3945 down_read(&info->groups_sem);
3946 list_for_each_entry(cache, &info->block_groups, list) {
3947 spin_lock(&cache->lock);
3948 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3949 "%llu pinned %llu reserved\n",
3950 (unsigned long long)cache->key.objectid,
3951 (unsigned long long)cache->key.offset,
3952 (unsigned long long)btrfs_block_group_used(&cache->item),
3953 (unsigned long long)cache->pinned,
3954 (unsigned long long)cache->reserved);
3955 btrfs_dump_free_space(cache, bytes);
3956 spin_unlock(&cache->lock);
3957 }
3958 up_read(&info->groups_sem);
3959 }
3960
3961 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3962 struct btrfs_root *root,
3963 u64 num_bytes, u64 min_alloc_size,
3964 u64 empty_size, u64 hint_byte,
3965 u64 search_end, struct btrfs_key *ins,
3966 u64 data)
3967 {
3968 int ret;
3969 u64 search_start = 0;
3970 struct btrfs_fs_info *info = root->fs_info;
3971
3972 data = btrfs_get_alloc_profile(root, data);
3973 again:
3974 /*
3975 * the only place that sets empty_size is btrfs_realloc_node, which
3976 * is not called recursively on allocations
3977 */
3978 if (empty_size || root->ref_cows) {
3979 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3980 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3981 2 * 1024 * 1024,
3982 BTRFS_BLOCK_GROUP_METADATA |
3983 (info->metadata_alloc_profile &
3984 info->avail_metadata_alloc_bits), 0);
3985 }
3986 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3987 num_bytes + 2 * 1024 * 1024, data, 0);
3988 }
3989
3990 WARN_ON(num_bytes < root->sectorsize);
3991 ret = find_free_extent(trans, root, num_bytes, empty_size,
3992 search_start, search_end, hint_byte, ins,
3993 trans->alloc_exclude_start,
3994 trans->alloc_exclude_nr, data);
3995
3996 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3997 num_bytes = num_bytes >> 1;
3998 num_bytes = num_bytes & ~(root->sectorsize - 1);
3999 num_bytes = max(num_bytes, min_alloc_size);
4000 do_chunk_alloc(trans, root->fs_info->extent_root,
4001 num_bytes, data, 1);
4002 goto again;
4003 }
4004 if (ret == -ENOSPC) {
4005 struct btrfs_space_info *sinfo;
4006
4007 sinfo = __find_space_info(root->fs_info, data);
4008 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4009 "wanted %llu\n", (unsigned long long)data,
4010 (unsigned long long)num_bytes);
4011 dump_space_info(sinfo, num_bytes);
4012 }
4013
4014 return ret;
4015 }
4016
4017 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4018 {
4019 struct btrfs_block_group_cache *cache;
4020 int ret = 0;
4021
4022 cache = btrfs_lookup_block_group(root->fs_info, start);
4023 if (!cache) {
4024 printk(KERN_ERR "Unable to find block group for %llu\n",
4025 (unsigned long long)start);
4026 return -ENOSPC;
4027 }
4028
4029 ret = btrfs_discard_extent(root, start, len);
4030
4031 btrfs_add_free_space(cache, start, len);
4032 btrfs_put_block_group(cache);
4033 update_reserved_extents(root, start, len, 0);
4034
4035 return ret;
4036 }
4037
4038 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4039 struct btrfs_root *root,
4040 u64 num_bytes, u64 min_alloc_size,
4041 u64 empty_size, u64 hint_byte,
4042 u64 search_end, struct btrfs_key *ins,
4043 u64 data)
4044 {
4045 int ret;
4046 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
4047 empty_size, hint_byte, search_end, ins,
4048 data);
4049 if (!ret)
4050 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4051
4052 return ret;
4053 }
4054
4055 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4056 struct btrfs_root *root,
4057 u64 parent, u64 root_objectid,
4058 u64 flags, u64 owner, u64 offset,
4059 struct btrfs_key *ins, int ref_mod)
4060 {
4061 int ret;
4062 struct btrfs_fs_info *fs_info = root->fs_info;
4063 struct btrfs_extent_item *extent_item;
4064 struct btrfs_extent_inline_ref *iref;
4065 struct btrfs_path *path;
4066 struct extent_buffer *leaf;
4067 int type;
4068 u32 size;
4069
4070 if (parent > 0)
4071 type = BTRFS_SHARED_DATA_REF_KEY;
4072 else
4073 type = BTRFS_EXTENT_DATA_REF_KEY;
4074
4075 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4076
4077 path = btrfs_alloc_path();
4078 BUG_ON(!path);
4079
4080 path->leave_spinning = 1;
4081 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4082 ins, size);
4083 BUG_ON(ret);
4084
4085 leaf = path->nodes[0];
4086 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4087 struct btrfs_extent_item);
4088 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4089 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4090 btrfs_set_extent_flags(leaf, extent_item,
4091 flags | BTRFS_EXTENT_FLAG_DATA);
4092
4093 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4094 btrfs_set_extent_inline_ref_type(leaf, iref, type);
4095 if (parent > 0) {
4096 struct btrfs_shared_data_ref *ref;
4097 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4098 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4099 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4100 } else {
4101 struct btrfs_extent_data_ref *ref;
4102 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4103 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4104 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4105 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4106 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4107 }
4108
4109 btrfs_mark_buffer_dirty(path->nodes[0]);
4110 btrfs_free_path(path);
4111
4112 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4113 1, 0);
4114 if (ret) {
4115 printk(KERN_ERR "btrfs update block group failed for %llu "
4116 "%llu\n", (unsigned long long)ins->objectid,
4117 (unsigned long long)ins->offset);
4118 BUG();
4119 }
4120 return ret;
4121 }
4122
4123 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4124 struct btrfs_root *root,
4125 u64 parent, u64 root_objectid,
4126 u64 flags, struct btrfs_disk_key *key,
4127 int level, struct btrfs_key *ins)
4128 {
4129 int ret;
4130 struct btrfs_fs_info *fs_info = root->fs_info;
4131 struct btrfs_extent_item *extent_item;
4132 struct btrfs_tree_block_info *block_info;
4133 struct btrfs_extent_inline_ref *iref;
4134 struct btrfs_path *path;
4135 struct extent_buffer *leaf;
4136 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4137
4138 path = btrfs_alloc_path();
4139 BUG_ON(!path);
4140
4141 path->leave_spinning = 1;
4142 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4143 ins, size);
4144 BUG_ON(ret);
4145
4146 leaf = path->nodes[0];
4147 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4148 struct btrfs_extent_item);
4149 btrfs_set_extent_refs(leaf, extent_item, 1);
4150 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4151 btrfs_set_extent_flags(leaf, extent_item,
4152 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4153 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4154
4155 btrfs_set_tree_block_key(leaf, block_info, key);
4156 btrfs_set_tree_block_level(leaf, block_info, level);
4157
4158 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4159 if (parent > 0) {
4160 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4161 btrfs_set_extent_inline_ref_type(leaf, iref,
4162 BTRFS_SHARED_BLOCK_REF_KEY);
4163 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4164 } else {
4165 btrfs_set_extent_inline_ref_type(leaf, iref,
4166 BTRFS_TREE_BLOCK_REF_KEY);
4167 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4168 }
4169
4170 btrfs_mark_buffer_dirty(leaf);
4171 btrfs_free_path(path);
4172
4173 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4174 1, 0);
4175 if (ret) {
4176 printk(KERN_ERR "btrfs update block group failed for %llu "
4177 "%llu\n", (unsigned long long)ins->objectid,
4178 (unsigned long long)ins->offset);
4179 BUG();
4180 }
4181 return ret;
4182 }
4183
4184 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4185 struct btrfs_root *root,
4186 u64 root_objectid, u64 owner,
4187 u64 offset, struct btrfs_key *ins)
4188 {
4189 int ret;
4190
4191 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4192
4193 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4194 0, root_objectid, owner, offset,
4195 BTRFS_ADD_DELAYED_EXTENT, NULL);
4196 return ret;
4197 }
4198
4199 /*
4200 * this is used by the tree logging recovery code. It records that
4201 * an extent has been allocated and makes sure to clear the free
4202 * space cache bits as well
4203 */
4204 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4205 struct btrfs_root *root,
4206 u64 root_objectid, u64 owner, u64 offset,
4207 struct btrfs_key *ins)
4208 {
4209 int ret;
4210 struct btrfs_block_group_cache *block_group;
4211
4212 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4213 cache_block_group(block_group);
4214 wait_event(block_group->caching_q,
4215 block_group_cache_done(block_group));
4216
4217 ret = btrfs_remove_free_space(block_group, ins->objectid,
4218 ins->offset);
4219 BUG_ON(ret);
4220 btrfs_put_block_group(block_group);
4221 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4222 0, owner, offset, ins, 1);
4223 return ret;
4224 }
4225
4226 /*
4227 * finds a free extent and does all the dirty work required for allocation
4228 * returns the key for the extent through ins, and a tree buffer for
4229 * the first block of the extent through buf.
4230 *
4231 * returns 0 if everything worked, non-zero otherwise.
4232 */
4233 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4234 struct btrfs_root *root,
4235 u64 num_bytes, u64 parent, u64 root_objectid,
4236 struct btrfs_disk_key *key, int level,
4237 u64 empty_size, u64 hint_byte, u64 search_end,
4238 struct btrfs_key *ins)
4239 {
4240 int ret;
4241 u64 flags = 0;
4242
4243 ret = __btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4244 empty_size, hint_byte, search_end,
4245 ins, 0);
4246 if (ret)
4247 return ret;
4248
4249 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4250 if (parent == 0)
4251 parent = ins->objectid;
4252 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4253 } else
4254 BUG_ON(parent > 0);
4255
4256 update_reserved_extents(root, ins->objectid, ins->offset, 1);
4257 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4258 struct btrfs_delayed_extent_op *extent_op;
4259 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4260 BUG_ON(!extent_op);
4261 if (key)
4262 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4263 else
4264 memset(&extent_op->key, 0, sizeof(extent_op->key));
4265 extent_op->flags_to_set = flags;
4266 extent_op->update_key = 1;
4267 extent_op->update_flags = 1;
4268 extent_op->is_data = 0;
4269
4270 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4271 ins->offset, parent, root_objectid,
4272 level, BTRFS_ADD_DELAYED_EXTENT,
4273 extent_op);
4274 BUG_ON(ret);
4275 }
4276 return ret;
4277 }
4278
4279 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4280 struct btrfs_root *root,
4281 u64 bytenr, u32 blocksize,
4282 int level)
4283 {
4284 struct extent_buffer *buf;
4285
4286 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4287 if (!buf)
4288 return ERR_PTR(-ENOMEM);
4289 btrfs_set_header_generation(buf, trans->transid);
4290 btrfs_set_buffer_lockdep_class(buf, level);
4291 btrfs_tree_lock(buf);
4292 clean_tree_block(trans, root, buf);
4293
4294 btrfs_set_lock_blocking(buf);
4295 btrfs_set_buffer_uptodate(buf);
4296
4297 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4298 set_extent_dirty(&root->dirty_log_pages, buf->start,
4299 buf->start + buf->len - 1, GFP_NOFS);
4300 } else {
4301 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4302 buf->start + buf->len - 1, GFP_NOFS);
4303 }
4304 trans->blocks_used++;
4305 /* this returns a buffer locked for blocking */
4306 return buf;
4307 }
4308
4309 /*
4310 * helper function to allocate a block for a given tree
4311 * returns the tree buffer or NULL.
4312 */
4313 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4314 struct btrfs_root *root, u32 blocksize,
4315 u64 parent, u64 root_objectid,
4316 struct btrfs_disk_key *key, int level,
4317 u64 hint, u64 empty_size)
4318 {
4319 struct btrfs_key ins;
4320 int ret;
4321 struct extent_buffer *buf;
4322
4323 ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4324 key, level, empty_size, hint, (u64)-1, &ins);
4325 if (ret) {
4326 BUG_ON(ret > 0);
4327 return ERR_PTR(ret);
4328 }
4329
4330 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4331 blocksize, level);
4332 return buf;
4333 }
4334
4335 #if 0
4336 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
4337 struct btrfs_root *root, struct extent_buffer *leaf)
4338 {
4339 u64 disk_bytenr;
4340 u64 num_bytes;
4341 struct btrfs_key key;
4342 struct btrfs_file_extent_item *fi;
4343 u32 nritems;
4344 int i;
4345 int ret;
4346
4347 BUG_ON(!btrfs_is_leaf(leaf));
4348 nritems = btrfs_header_nritems(leaf);
4349
4350 for (i = 0; i < nritems; i++) {
4351 cond_resched();
4352 btrfs_item_key_to_cpu(leaf, &key, i);
4353
4354 /* only extents have references, skip everything else */
4355 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
4356 continue;
4357
4358 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
4359
4360 /* inline extents live in the btree, they don't have refs */
4361 if (btrfs_file_extent_type(leaf, fi) ==
4362 BTRFS_FILE_EXTENT_INLINE)
4363 continue;
4364
4365 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
4366
4367 /* holes don't have refs */
4368 if (disk_bytenr == 0)
4369 continue;
4370
4371 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
4372 ret = btrfs_free_extent(trans, root, disk_bytenr, num_bytes,
4373 leaf->start, 0, key.objectid, 0);
4374 BUG_ON(ret);
4375 }
4376 return 0;
4377 }
4378
4379 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
4380 struct btrfs_root *root,
4381 struct btrfs_leaf_ref *ref)
4382 {
4383 int i;
4384 int ret;
4385 struct btrfs_extent_info *info;
4386 struct refsort *sorted;
4387
4388 if (ref->nritems == 0)
4389 return 0;
4390
4391 sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
4392 for (i = 0; i < ref->nritems; i++) {
4393 sorted[i].bytenr = ref->extents[i].bytenr;
4394 sorted[i].slot = i;
4395 }
4396 sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
4397
4398 /*
4399 * the items in the ref were sorted when the ref was inserted
4400 * into the ref cache, so this is already in order
4401 */
4402 for (i = 0; i < ref->nritems; i++) {
4403 info = ref->extents + sorted[i].slot;
4404 ret = btrfs_free_extent(trans, root, info->bytenr,
4405 info->num_bytes, ref->bytenr,
4406 ref->owner, ref->generation,
4407 info->objectid, 0);
4408
4409 atomic_inc(&root->fs_info->throttle_gen);
4410 wake_up(&root->fs_info->transaction_throttle);
4411 cond_resched();
4412
4413 BUG_ON(ret);
4414 info++;
4415 }
4416
4417 kfree(sorted);
4418 return 0;
4419 }
4420
4421
4422 static int drop_snap_lookup_refcount(struct btrfs_trans_handle *trans,
4423 struct btrfs_root *root, u64 start,
4424 u64 len, u32 *refs)
4425 {
4426 int ret;
4427
4428 ret = btrfs_lookup_extent_refs(trans, root, start, len, refs);
4429 BUG_ON(ret);
4430
4431 #if 0 /* some debugging code in case we see problems here */
4432 /* if the refs count is one, it won't get increased again. But
4433 * if the ref count is > 1, someone may be decreasing it at
4434 * the same time we are.
4435 */
4436 if (*refs != 1) {
4437 struct extent_buffer *eb = NULL;
4438 eb = btrfs_find_create_tree_block(root, start, len);
4439 if (eb)
4440 btrfs_tree_lock(eb);
4441
4442 mutex_lock(&root->fs_info->alloc_mutex);
4443 ret = lookup_extent_ref(NULL, root, start, len, refs);
4444 BUG_ON(ret);
4445 mutex_unlock(&root->fs_info->alloc_mutex);
4446
4447 if (eb) {
4448 btrfs_tree_unlock(eb);
4449 free_extent_buffer(eb);
4450 }
4451 if (*refs == 1) {
4452 printk(KERN_ERR "btrfs block %llu went down to one "
4453 "during drop_snap\n", (unsigned long long)start);
4454 }
4455
4456 }
4457 #endif
4458
4459 cond_resched();
4460 return ret;
4461 }
4462
4463
4464 /*
4465 * this is used while deleting old snapshots, and it drops the refs
4466 * on a whole subtree starting from a level 1 node.
4467 *
4468 * The idea is to sort all the leaf pointers, and then drop the
4469 * ref on all the leaves in order. Most of the time the leaves
4470 * will have ref cache entries, so no leaf IOs will be required to
4471 * find the extents they have references on.
4472 *
4473 * For each leaf, any references it has are also dropped in order
4474 *
4475 * This ends up dropping the references in something close to optimal
4476 * order for reading and modifying the extent allocation tree.
4477 */
4478 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
4479 struct btrfs_root *root,
4480 struct btrfs_path *path)
4481 {
4482 u64 bytenr;
4483 u64 root_owner;
4484 u64 root_gen;
4485 struct extent_buffer *eb = path->nodes[1];
4486 struct extent_buffer *leaf;
4487 struct btrfs_leaf_ref *ref;
4488 struct refsort *sorted = NULL;
4489 int nritems = btrfs_header_nritems(eb);
4490 int ret;
4491 int i;
4492 int refi = 0;
4493 int slot = path->slots[1];
4494 u32 blocksize = btrfs_level_size(root, 0);
4495 u32 refs;
4496
4497 if (nritems == 0)
4498 goto out;
4499
4500 root_owner = btrfs_header_owner(eb);
4501 root_gen = btrfs_header_generation(eb);
4502 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
4503
4504 /*
4505 * step one, sort all the leaf pointers so we don't scribble
4506 * randomly into the extent allocation tree
4507 */
4508 for (i = slot; i < nritems; i++) {
4509 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
4510 sorted[refi].slot = i;
4511 refi++;
4512 }
4513
4514 /*
4515 * nritems won't be zero, but if we're picking up drop_snapshot
4516 * after a crash, slot might be > 0, so double check things
4517 * just in case.
4518 */
4519 if (refi == 0)
4520 goto out;
4521
4522 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
4523
4524 /*
4525 * the first loop frees everything the leaves point to
4526 */
4527 for (i = 0; i < refi; i++) {
4528 u64 ptr_gen;
4529
4530 bytenr = sorted[i].bytenr;
4531
4532 /*
4533 * check the reference count on this leaf. If it is > 1
4534 * we just decrement it below and don't update any
4535 * of the refs the leaf points to.
4536 */
4537 ret = drop_snap_lookup_refcount(trans, root, bytenr,
4538 blocksize, &refs);
4539 BUG_ON(ret);
4540 if (refs != 1)
4541 continue;
4542
4543 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
4544
4545 /*
4546 * the leaf only had one reference, which means the
4547 * only thing pointing to this leaf is the snapshot
4548 * we're deleting. It isn't possible for the reference
4549 * count to increase again later
4550 *
4551 * The reference cache is checked for the leaf,
4552 * and if found we'll be able to drop any refs held by
4553 * the leaf without needing to read it in.
4554 */
4555 ref = btrfs_lookup_leaf_ref(root, bytenr);
4556 if (ref && ref->generation != ptr_gen) {
4557 btrfs_free_leaf_ref(root, ref);
4558 ref = NULL;
4559 }
4560 if (ref) {
4561 ret = cache_drop_leaf_ref(trans, root, ref);
4562 BUG_ON(ret);
4563 btrfs_remove_leaf_ref(root, ref);
4564 btrfs_free_leaf_ref(root, ref);
4565 } else {
4566 /*
4567 * the leaf wasn't in the reference cache, so
4568 * we have to read it.
4569 */
4570 leaf = read_tree_block(root, bytenr, blocksize,
4571 ptr_gen);
4572 ret = btrfs_drop_leaf_ref(trans, root, leaf);
4573 BUG_ON(ret);
4574 free_extent_buffer(leaf);
4575 }
4576 atomic_inc(&root->fs_info->throttle_gen);
4577 wake_up(&root->fs_info->transaction_throttle);
4578 cond_resched();
4579 }
4580
4581 /*
4582 * run through the loop again to free the refs on the leaves.
4583 * This is faster than doing it in the loop above because
4584 * the leaves are likely to be clustered together. We end up
4585 * working in nice chunks on the extent allocation tree.
4586 */
4587 for (i = 0; i < refi; i++) {
4588 bytenr = sorted[i].bytenr;
4589 ret = btrfs_free_extent(trans, root, bytenr,
4590 blocksize, eb->start,
4591 root_owner, root_gen, 0, 1);
4592 BUG_ON(ret);
4593
4594 atomic_inc(&root->fs_info->throttle_gen);
4595 wake_up(&root->fs_info->transaction_throttle);
4596 cond_resched();
4597 }
4598 out:
4599 kfree(sorted);
4600
4601 /*
4602 * update the path to show we've processed the entire level 1
4603 * node. This will get saved into the root's drop_snapshot_progress
4604 * field so these drops are not repeated again if this transaction
4605 * commits.
4606 */
4607 path->slots[1] = nritems;
4608 return 0;
4609 }
4610
4611 /*
4612 * helper function for drop_snapshot, this walks down the tree dropping ref
4613 * counts as it goes.
4614 */
4615 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4616 struct btrfs_root *root,
4617 struct btrfs_path *path, int *level)
4618 {
4619 u64 root_owner;
4620 u64 root_gen;
4621 u64 bytenr;
4622 u64 ptr_gen;
4623 struct extent_buffer *next;
4624 struct extent_buffer *cur;
4625 struct extent_buffer *parent;
4626 u32 blocksize;
4627 int ret;
4628 u32 refs;
4629
4630 WARN_ON(*level < 0);
4631 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4632 ret = drop_snap_lookup_refcount(trans, root, path->nodes[*level]->start,
4633 path->nodes[*level]->len, &refs);
4634 BUG_ON(ret);
4635 if (refs > 1)
4636 goto out;
4637
4638 /*
4639 * walk down to the last node level and free all the leaves
4640 */
4641 while (*level >= 0) {
4642 WARN_ON(*level < 0);
4643 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4644 cur = path->nodes[*level];
4645
4646 if (btrfs_header_level(cur) != *level)
4647 WARN_ON(1);
4648
4649 if (path->slots[*level] >=
4650 btrfs_header_nritems(cur))
4651 break;
4652
4653 /* the new code goes down to level 1 and does all the
4654 * leaves pointed to that node in bulk. So, this check
4655 * for level 0 will always be false.
4656 *
4657 * But, the disk format allows the drop_snapshot_progress
4658 * field in the root to leave things in a state where
4659 * a leaf will need cleaning up here. If someone crashes
4660 * with the old code and then boots with the new code,
4661 * we might find a leaf here.
4662 */
4663 if (*level == 0) {
4664 ret = btrfs_drop_leaf_ref(trans, root, cur);
4665 BUG_ON(ret);
4666 break;
4667 }
4668
4669 /*
4670 * once we get to level one, process the whole node
4671 * at once, including everything below it.
4672 */
4673 if (*level == 1) {
4674 ret = drop_level_one_refs(trans, root, path);
4675 BUG_ON(ret);
4676 break;
4677 }
4678
4679 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
4680 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
4681 blocksize = btrfs_level_size(root, *level - 1);
4682
4683 ret = drop_snap_lookup_refcount(trans, root, bytenr,
4684 blocksize, &refs);
4685 BUG_ON(ret);
4686
4687 /*
4688 * if there is more than one reference, we don't need
4689 * to read that node to drop any references it has. We
4690 * just drop the ref we hold on that node and move on to the
4691 * next slot in this level.
4692 */
4693 if (refs != 1) {
4694 parent = path->nodes[*level];
4695 root_owner = btrfs_header_owner(parent);
4696 root_gen = btrfs_header_generation(parent);
4697 path->slots[*level]++;
4698
4699 ret = btrfs_free_extent(trans, root, bytenr,
4700 blocksize, parent->start,
4701 root_owner, root_gen,
4702 *level - 1, 1);
4703 BUG_ON(ret);
4704
4705 atomic_inc(&root->fs_info->throttle_gen);
4706 wake_up(&root->fs_info->transaction_throttle);
4707 cond_resched();
4708
4709 continue;
4710 }
4711
4712 /*
4713 * we need to keep freeing things in the next level down.
4714 * read the block and loop around to process it
4715 */
4716 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
4717 WARN_ON(*level <= 0);
4718 if (path->nodes[*level-1])
4719 free_extent_buffer(path->nodes[*level-1]);
4720 path->nodes[*level-1] = next;
4721 *level = btrfs_header_level(next);
4722 path->slots[*level] = 0;
4723 cond_resched();
4724 }
4725 out:
4726 WARN_ON(*level < 0);
4727 WARN_ON(*level >= BTRFS_MAX_LEVEL);
4728
4729 if (path->nodes[*level] == root->node) {
4730 parent = path->nodes[*level];
4731 bytenr = path->nodes[*level]->start;
4732 } else {
4733 parent = path->nodes[*level + 1];
4734 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
4735 }
4736
4737 blocksize = btrfs_level_size(root, *level);
4738 root_owner = btrfs_header_owner(parent);
4739 root_gen = btrfs_header_generation(parent);
4740
4741 /*
4742 * cleanup and free the reference on the last node
4743 * we processed
4744 */
4745 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
4746 parent->start, root_owner, root_gen,
4747 *level, 1);
4748 free_extent_buffer(path->nodes[*level]);
4749 path->nodes[*level] = NULL;
4750
4751 *level += 1;
4752 BUG_ON(ret);
4753
4754 cond_resched();
4755 return 0;
4756 }
4757 #endif
4758
4759 struct walk_control {
4760 u64 refs[BTRFS_MAX_LEVEL];
4761 u64 flags[BTRFS_MAX_LEVEL];
4762 struct btrfs_key update_progress;
4763 int stage;
4764 int level;
4765 int shared_level;
4766 int update_ref;
4767 int keep_locks;
4768 };
4769
4770 #define DROP_REFERENCE 1
4771 #define UPDATE_BACKREF 2
4772
4773 /*
4774 * hepler to process tree block while walking down the tree.
4775 *
4776 * when wc->stage == DROP_REFERENCE, this function checks
4777 * reference count of the block. if the block is shared and
4778 * we need update back refs for the subtree rooted at the
4779 * block, this function changes wc->stage to UPDATE_BACKREF
4780 *
4781 * when wc->stage == UPDATE_BACKREF, this function updates
4782 * back refs for pointers in the block.
4783 *
4784 * NOTE: return value 1 means we should stop walking down.
4785 */
4786 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4787 struct btrfs_root *root,
4788 struct btrfs_path *path,
4789 struct walk_control *wc)
4790 {
4791 int level = wc->level;
4792 struct extent_buffer *eb = path->nodes[level];
4793 struct btrfs_key key;
4794 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4795 int ret;
4796
4797 if (wc->stage == UPDATE_BACKREF &&
4798 btrfs_header_owner(eb) != root->root_key.objectid)
4799 return 1;
4800
4801 /*
4802 * when reference count of tree block is 1, it won't increase
4803 * again. once full backref flag is set, we never clear it.
4804 */
4805 if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4806 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4807 BUG_ON(!path->locks[level]);
4808 ret = btrfs_lookup_extent_info(trans, root,
4809 eb->start, eb->len,
4810 &wc->refs[level],
4811 &wc->flags[level]);
4812 BUG_ON(ret);
4813 BUG_ON(wc->refs[level] == 0);
4814 }
4815
4816 if (wc->stage == DROP_REFERENCE &&
4817 wc->update_ref && wc->refs[level] > 1) {
4818 BUG_ON(eb == root->node);
4819 BUG_ON(path->slots[level] > 0);
4820 if (level == 0)
4821 btrfs_item_key_to_cpu(eb, &key, path->slots[level]);
4822 else
4823 btrfs_node_key_to_cpu(eb, &key, path->slots[level]);
4824 if (btrfs_header_owner(eb) == root->root_key.objectid &&
4825 btrfs_comp_cpu_keys(&key, &wc->update_progress) >= 0) {
4826 wc->stage = UPDATE_BACKREF;
4827 wc->shared_level = level;
4828 }
4829 }
4830
4831 if (wc->stage == DROP_REFERENCE) {
4832 if (wc->refs[level] > 1)
4833 return 1;
4834
4835 if (path->locks[level] && !wc->keep_locks) {
4836 btrfs_tree_unlock(eb);
4837 path->locks[level] = 0;
4838 }
4839 return 0;
4840 }
4841
4842 /* wc->stage == UPDATE_BACKREF */
4843 if (!(wc->flags[level] & flag)) {
4844 BUG_ON(!path->locks[level]);
4845 ret = btrfs_inc_ref(trans, root, eb, 1);
4846 BUG_ON(ret);
4847 ret = btrfs_dec_ref(trans, root, eb, 0);
4848 BUG_ON(ret);
4849 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4850 eb->len, flag, 0);
4851 BUG_ON(ret);
4852 wc->flags[level] |= flag;
4853 }
4854
4855 /*
4856 * the block is shared by multiple trees, so it's not good to
4857 * keep the tree lock
4858 */
4859 if (path->locks[level] && level > 0) {
4860 btrfs_tree_unlock(eb);
4861 path->locks[level] = 0;
4862 }
4863 return 0;
4864 }
4865
4866 /*
4867 * hepler to process tree block while walking up the tree.
4868 *
4869 * when wc->stage == DROP_REFERENCE, this function drops
4870 * reference count on the block.
4871 *
4872 * when wc->stage == UPDATE_BACKREF, this function changes
4873 * wc->stage back to DROP_REFERENCE if we changed wc->stage
4874 * to UPDATE_BACKREF previously while processing the block.
4875 *
4876 * NOTE: return value 1 means we should stop walking up.
4877 */
4878 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
4879 struct btrfs_root *root,
4880 struct btrfs_path *path,
4881 struct walk_control *wc)
4882 {
4883 int ret = 0;
4884 int level = wc->level;
4885 struct extent_buffer *eb = path->nodes[level];
4886 u64 parent = 0;
4887
4888 if (wc->stage == UPDATE_BACKREF) {
4889 BUG_ON(wc->shared_level < level);
4890 if (level < wc->shared_level)
4891 goto out;
4892
4893 BUG_ON(wc->refs[level] <= 1);
4894 ret = find_next_key(path, level + 1, &wc->update_progress);
4895 if (ret > 0)
4896 wc->update_ref = 0;
4897
4898 wc->stage = DROP_REFERENCE;
4899 wc->shared_level = -1;
4900 path->slots[level] = 0;
4901
4902 /*
4903 * check reference count again if the block isn't locked.
4904 * we should start walking down the tree again if reference
4905 * count is one.
4906 */
4907 if (!path->locks[level]) {
4908 BUG_ON(level == 0);
4909 btrfs_tree_lock(eb);
4910 btrfs_set_lock_blocking(eb);
4911 path->locks[level] = 1;
4912
4913 ret = btrfs_lookup_extent_info(trans, root,
4914 eb->start, eb->len,
4915 &wc->refs[level],
4916 &wc->flags[level]);
4917 BUG_ON(ret);
4918 BUG_ON(wc->refs[level] == 0);
4919 if (wc->refs[level] == 1) {
4920 btrfs_tree_unlock(eb);
4921 path->locks[level] = 0;
4922 return 1;
4923 }
4924 } else {
4925 BUG_ON(level != 0);
4926 }
4927 }
4928
4929 /* wc->stage == DROP_REFERENCE */
4930 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
4931
4932 if (wc->refs[level] == 1) {
4933 if (level == 0) {
4934 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4935 ret = btrfs_dec_ref(trans, root, eb, 1);
4936 else
4937 ret = btrfs_dec_ref(trans, root, eb, 0);
4938 BUG_ON(ret);
4939 }
4940 /* make block locked assertion in clean_tree_block happy */
4941 if (!path->locks[level] &&
4942 btrfs_header_generation(eb) == trans->transid) {
4943 btrfs_tree_lock(eb);
4944 btrfs_set_lock_blocking(eb);
4945 path->locks[level] = 1;
4946 }
4947 clean_tree_block(trans, root, eb);
4948 }
4949
4950 if (eb == root->node) {
4951 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4952 parent = eb->start;
4953 else
4954 BUG_ON(root->root_key.objectid !=
4955 btrfs_header_owner(eb));
4956 } else {
4957 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
4958 parent = path->nodes[level + 1]->start;
4959 else
4960 BUG_ON(root->root_key.objectid !=
4961 btrfs_header_owner(path->nodes[level + 1]));
4962 }
4963
4964 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
4965 root->root_key.objectid, level, 0);
4966 BUG_ON(ret);
4967 out:
4968 wc->refs[level] = 0;
4969 wc->flags[level] = 0;
4970 return ret;
4971 }
4972
4973 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
4974 struct btrfs_root *root,
4975 struct btrfs_path *path,
4976 struct walk_control *wc)
4977 {
4978 struct extent_buffer *next;
4979 struct extent_buffer *cur;
4980 u64 bytenr;
4981 u64 ptr_gen;
4982 u32 blocksize;
4983 int level = wc->level;
4984 int ret;
4985
4986 while (level >= 0) {
4987 cur = path->nodes[level];
4988 BUG_ON(path->slots[level] >= btrfs_header_nritems(cur));
4989
4990 ret = walk_down_proc(trans, root, path, wc);
4991 if (ret > 0)
4992 break;
4993
4994 if (level == 0)
4995 break;
4996
4997 bytenr = btrfs_node_blockptr(cur, path->slots[level]);
4998 blocksize = btrfs_level_size(root, level - 1);
4999 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[level]);
5000
5001 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
5002 btrfs_tree_lock(next);
5003 btrfs_set_lock_blocking(next);
5004
5005 level--;
5006 BUG_ON(level != btrfs_header_level(next));
5007 path->nodes[level] = next;
5008 path->slots[level] = 0;
5009 path->locks[level] = 1;
5010 wc->level = level;
5011 }
5012 return 0;
5013 }
5014
5015 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5016 struct btrfs_root *root,
5017 struct btrfs_path *path,
5018 struct walk_control *wc, int max_level)
5019 {
5020 int level = wc->level;
5021 int ret;
5022
5023 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5024 while (level < max_level && path->nodes[level]) {
5025 wc->level = level;
5026 if (path->slots[level] + 1 <
5027 btrfs_header_nritems(path->nodes[level])) {
5028 path->slots[level]++;
5029 return 0;
5030 } else {
5031 ret = walk_up_proc(trans, root, path, wc);
5032 if (ret > 0)
5033 return 0;
5034
5035 if (path->locks[level]) {
5036 btrfs_tree_unlock(path->nodes[level]);
5037 path->locks[level] = 0;
5038 }
5039 free_extent_buffer(path->nodes[level]);
5040 path->nodes[level] = NULL;
5041 level++;
5042 }
5043 }
5044 return 1;
5045 }
5046
5047 /*
5048 * drop a subvolume tree.
5049 *
5050 * this function traverses the tree freeing any blocks that only
5051 * referenced by the tree.
5052 *
5053 * when a shared tree block is found. this function decreases its
5054 * reference count by one. if update_ref is true, this function
5055 * also make sure backrefs for the shared block and all lower level
5056 * blocks are properly updated.
5057 */
5058 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5059 {
5060 struct btrfs_path *path;
5061 struct btrfs_trans_handle *trans;
5062 struct btrfs_root *tree_root = root->fs_info->tree_root;
5063 struct btrfs_root_item *root_item = &root->root_item;
5064 struct walk_control *wc;
5065 struct btrfs_key key;
5066 int err = 0;
5067 int ret;
5068 int level;
5069
5070 path = btrfs_alloc_path();
5071 BUG_ON(!path);
5072
5073 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5074 BUG_ON(!wc);
5075
5076 trans = btrfs_start_transaction(tree_root, 1);
5077
5078 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5079 level = btrfs_header_level(root->node);
5080 path->nodes[level] = btrfs_lock_root_node(root);
5081 btrfs_set_lock_blocking(path->nodes[level]);
5082 path->slots[level] = 0;
5083 path->locks[level] = 1;
5084 memset(&wc->update_progress, 0,
5085 sizeof(wc->update_progress));
5086 } else {
5087 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5088 memcpy(&wc->update_progress, &key,
5089 sizeof(wc->update_progress));
5090
5091 level = root_item->drop_level;
5092 BUG_ON(level == 0);
5093 path->lowest_level = level;
5094 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5095 path->lowest_level = 0;
5096 if (ret < 0) {
5097 err = ret;
5098 goto out;
5099 }
5100 btrfs_node_key_to_cpu(path->nodes[level], &key,
5101 path->slots[level]);
5102 WARN_ON(memcmp(&key, &wc->update_progress, sizeof(key)));
5103
5104 /*
5105 * unlock our path, this is safe because only this
5106 * function is allowed to delete this snapshot
5107 */
5108 btrfs_unlock_up_safe(path, 0);
5109
5110 level = btrfs_header_level(root->node);
5111 while (1) {
5112 btrfs_tree_lock(path->nodes[level]);
5113 btrfs_set_lock_blocking(path->nodes[level]);
5114
5115 ret = btrfs_lookup_extent_info(trans, root,
5116 path->nodes[level]->start,
5117 path->nodes[level]->len,
5118 &wc->refs[level],
5119 &wc->flags[level]);
5120 BUG_ON(ret);
5121 BUG_ON(wc->refs[level] == 0);
5122
5123 if (level == root_item->drop_level)
5124 break;
5125
5126 btrfs_tree_unlock(path->nodes[level]);
5127 WARN_ON(wc->refs[level] != 1);
5128 level--;
5129 }
5130 }
5131
5132 wc->level = level;
5133 wc->shared_level = -1;
5134 wc->stage = DROP_REFERENCE;
5135 wc->update_ref = update_ref;
5136 wc->keep_locks = 0;
5137
5138 while (1) {
5139 ret = walk_down_tree(trans, root, path, wc);
5140 if (ret < 0) {
5141 err = ret;
5142 break;
5143 }
5144
5145 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5146 if (ret < 0) {
5147 err = ret;
5148 break;
5149 }
5150
5151 if (ret > 0) {
5152 BUG_ON(wc->stage != DROP_REFERENCE);
5153 break;
5154 }
5155
5156 if (wc->stage == DROP_REFERENCE) {
5157 level = wc->level;
5158 btrfs_node_key(path->nodes[level],
5159 &root_item->drop_progress,
5160 path->slots[level]);
5161 root_item->drop_level = level;
5162 }
5163
5164 BUG_ON(wc->level == 0);
5165 if (trans->transaction->in_commit ||
5166 trans->transaction->delayed_refs.flushing) {
5167 ret = btrfs_update_root(trans, tree_root,
5168 &root->root_key,
5169 root_item);
5170 BUG_ON(ret);
5171
5172 btrfs_end_transaction(trans, tree_root);
5173 trans = btrfs_start_transaction(tree_root, 1);
5174 } else {
5175 unsigned long update;
5176 update = trans->delayed_ref_updates;
5177 trans->delayed_ref_updates = 0;
5178 if (update)
5179 btrfs_run_delayed_refs(trans, tree_root,
5180 update);
5181 }
5182 }
5183 btrfs_release_path(root, path);
5184 BUG_ON(err);
5185
5186 ret = btrfs_del_root(trans, tree_root, &root->root_key);
5187 BUG_ON(ret);
5188
5189 free_extent_buffer(root->node);
5190 free_extent_buffer(root->commit_root);
5191 kfree(root);
5192 out:
5193 btrfs_end_transaction(trans, tree_root);
5194 kfree(wc);
5195 btrfs_free_path(path);
5196 return err;
5197 }
5198
5199 /*
5200 * drop subtree rooted at tree block 'node'.
5201 *
5202 * NOTE: this function will unlock and release tree block 'node'
5203 */
5204 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5205 struct btrfs_root *root,
5206 struct extent_buffer *node,
5207 struct extent_buffer *parent)
5208 {
5209 struct btrfs_path *path;
5210 struct walk_control *wc;
5211 int level;
5212 int parent_level;
5213 int ret = 0;
5214 int wret;
5215
5216 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5217
5218 path = btrfs_alloc_path();
5219 BUG_ON(!path);
5220
5221 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5222 BUG_ON(!wc);
5223
5224 btrfs_assert_tree_locked(parent);
5225 parent_level = btrfs_header_level(parent);
5226 extent_buffer_get(parent);
5227 path->nodes[parent_level] = parent;
5228 path->slots[parent_level] = btrfs_header_nritems(parent);
5229
5230 btrfs_assert_tree_locked(node);
5231 level = btrfs_header_level(node);
5232 path->nodes[level] = node;
5233 path->slots[level] = 0;
5234 path->locks[level] = 1;
5235
5236 wc->refs[parent_level] = 1;
5237 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5238 wc->level = level;
5239 wc->shared_level = -1;
5240 wc->stage = DROP_REFERENCE;
5241 wc->update_ref = 0;
5242 wc->keep_locks = 1;
5243
5244 while (1) {
5245 wret = walk_down_tree(trans, root, path, wc);
5246 if (wret < 0) {
5247 ret = wret;
5248 break;
5249 }
5250
5251 wret = walk_up_tree(trans, root, path, wc, parent_level);
5252 if (wret < 0)
5253 ret = wret;
5254 if (wret != 0)
5255 break;
5256 }
5257
5258 kfree(wc);
5259 btrfs_free_path(path);
5260 return ret;
5261 }
5262
5263 #if 0
5264 static unsigned long calc_ra(unsigned long start, unsigned long last,
5265 unsigned long nr)
5266 {
5267 return min(last, start + nr - 1);
5268 }
5269
5270 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5271 u64 len)
5272 {
5273 u64 page_start;
5274 u64 page_end;
5275 unsigned long first_index;
5276 unsigned long last_index;
5277 unsigned long i;
5278 struct page *page;
5279 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5280 struct file_ra_state *ra;
5281 struct btrfs_ordered_extent *ordered;
5282 unsigned int total_read = 0;
5283 unsigned int total_dirty = 0;
5284 int ret = 0;
5285
5286 ra = kzalloc(sizeof(*ra), GFP_NOFS);
5287
5288 mutex_lock(&inode->i_mutex);
5289 first_index = start >> PAGE_CACHE_SHIFT;
5290 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5291
5292 /* make sure the dirty trick played by the caller work */
5293 ret = invalidate_inode_pages2_range(inode->i_mapping,
5294 first_index, last_index);
5295 if (ret)
5296 goto out_unlock;
5297
5298 file_ra_state_init(ra, inode->i_mapping);
5299
5300 for (i = first_index ; i <= last_index; i++) {
5301 if (total_read % ra->ra_pages == 0) {
5302 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5303 calc_ra(i, last_index, ra->ra_pages));
5304 }
5305 total_read++;
5306 again:
5307 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5308 BUG_ON(1);
5309 page = grab_cache_page(inode->i_mapping, i);
5310 if (!page) {
5311 ret = -ENOMEM;
5312 goto out_unlock;
5313 }
5314 if (!PageUptodate(page)) {
5315 btrfs_readpage(NULL, page);
5316 lock_page(page);
5317 if (!PageUptodate(page)) {
5318 unlock_page(page);
5319 page_cache_release(page);
5320 ret = -EIO;
5321 goto out_unlock;
5322 }
5323 }
5324 wait_on_page_writeback(page);
5325
5326 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5327 page_end = page_start + PAGE_CACHE_SIZE - 1;
5328 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5329
5330 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5331 if (ordered) {
5332 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5333 unlock_page(page);
5334 page_cache_release(page);
5335 btrfs_start_ordered_extent(inode, ordered, 1);
5336 btrfs_put_ordered_extent(ordered);
5337 goto again;
5338 }
5339 set_page_extent_mapped(page);
5340
5341 if (i == first_index)
5342 set_extent_bits(io_tree, page_start, page_end,
5343 EXTENT_BOUNDARY, GFP_NOFS);
5344 btrfs_set_extent_delalloc(inode, page_start, page_end);
5345
5346 set_page_dirty(page);
5347 total_dirty++;
5348
5349 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5350 unlock_page(page);
5351 page_cache_release(page);
5352 }
5353
5354 out_unlock:
5355 kfree(ra);
5356 mutex_unlock(&inode->i_mutex);
5357 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5358 return ret;
5359 }
5360
5361 static noinline int relocate_data_extent(struct inode *reloc_inode,
5362 struct btrfs_key *extent_key,
5363 u64 offset)
5364 {
5365 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5366 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5367 struct extent_map *em;
5368 u64 start = extent_key->objectid - offset;
5369 u64 end = start + extent_key->offset - 1;
5370
5371 em = alloc_extent_map(GFP_NOFS);
5372 BUG_ON(!em || IS_ERR(em));
5373
5374 em->start = start;
5375 em->len = extent_key->offset;
5376 em->block_len = extent_key->offset;
5377 em->block_start = extent_key->objectid;
5378 em->bdev = root->fs_info->fs_devices->latest_bdev;
5379 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5380
5381 /* setup extent map to cheat btrfs_readpage */
5382 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5383 while (1) {
5384 int ret;
5385 spin_lock(&em_tree->lock);
5386 ret = add_extent_mapping(em_tree, em);
5387 spin_unlock(&em_tree->lock);
5388 if (ret != -EEXIST) {
5389 free_extent_map(em);
5390 break;
5391 }
5392 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5393 }
5394 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5395
5396 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5397 }
5398
5399 struct btrfs_ref_path {
5400 u64 extent_start;
5401 u64 nodes[BTRFS_MAX_LEVEL];
5402 u64 root_objectid;
5403 u64 root_generation;
5404 u64 owner_objectid;
5405 u32 num_refs;
5406 int lowest_level;
5407 int current_level;
5408 int shared_level;
5409
5410 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5411 u64 new_nodes[BTRFS_MAX_LEVEL];
5412 };
5413
5414 struct disk_extent {
5415 u64 ram_bytes;
5416 u64 disk_bytenr;
5417 u64 disk_num_bytes;
5418 u64 offset;
5419 u64 num_bytes;
5420 u8 compression;
5421 u8 encryption;
5422 u16 other_encoding;
5423 };
5424
5425 static int is_cowonly_root(u64 root_objectid)
5426 {
5427 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5428 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5429 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5430 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5431 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5432 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5433 return 1;
5434 return 0;
5435 }
5436
5437 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5438 struct btrfs_root *extent_root,
5439 struct btrfs_ref_path *ref_path,
5440 int first_time)
5441 {
5442 struct extent_buffer *leaf;
5443 struct btrfs_path *path;
5444 struct btrfs_extent_ref *ref;
5445 struct btrfs_key key;
5446 struct btrfs_key found_key;
5447 u64 bytenr;
5448 u32 nritems;
5449 int level;
5450 int ret = 1;
5451
5452 path = btrfs_alloc_path();
5453 if (!path)
5454 return -ENOMEM;
5455
5456 if (first_time) {
5457 ref_path->lowest_level = -1;
5458 ref_path->current_level = -1;
5459 ref_path->shared_level = -1;
5460 goto walk_up;
5461 }
5462 walk_down:
5463 level = ref_path->current_level - 1;
5464 while (level >= -1) {
5465 u64 parent;
5466 if (level < ref_path->lowest_level)
5467 break;
5468
5469 if (level >= 0)
5470 bytenr = ref_path->nodes[level];
5471 else
5472 bytenr = ref_path->extent_start;
5473 BUG_ON(bytenr == 0);
5474
5475 parent = ref_path->nodes[level + 1];
5476 ref_path->nodes[level + 1] = 0;
5477 ref_path->current_level = level;
5478 BUG_ON(parent == 0);
5479
5480 key.objectid = bytenr;
5481 key.offset = parent + 1;
5482 key.type = BTRFS_EXTENT_REF_KEY;
5483
5484 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5485 if (ret < 0)
5486 goto out;
5487 BUG_ON(ret == 0);
5488
5489 leaf = path->nodes[0];
5490 nritems = btrfs_header_nritems(leaf);
5491 if (path->slots[0] >= nritems) {
5492 ret = btrfs_next_leaf(extent_root, path);
5493 if (ret < 0)
5494 goto out;
5495 if (ret > 0)
5496 goto next;
5497 leaf = path->nodes[0];
5498 }
5499
5500 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5501 if (found_key.objectid == bytenr &&
5502 found_key.type == BTRFS_EXTENT_REF_KEY) {
5503 if (level < ref_path->shared_level)
5504 ref_path->shared_level = level;
5505 goto found;
5506 }
5507 next:
5508 level--;
5509 btrfs_release_path(extent_root, path);
5510 cond_resched();
5511 }
5512 /* reached lowest level */
5513 ret = 1;
5514 goto out;
5515 walk_up:
5516 level = ref_path->current_level;
5517 while (level < BTRFS_MAX_LEVEL - 1) {
5518 u64 ref_objectid;
5519
5520 if (level >= 0)
5521 bytenr = ref_path->nodes[level];
5522 else
5523 bytenr = ref_path->extent_start;
5524
5525 BUG_ON(bytenr == 0);
5526
5527 key.objectid = bytenr;
5528 key.offset = 0;
5529 key.type = BTRFS_EXTENT_REF_KEY;
5530
5531 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5532 if (ret < 0)
5533 goto out;
5534
5535 leaf = path->nodes[0];
5536 nritems = btrfs_header_nritems(leaf);
5537 if (path->slots[0] >= nritems) {
5538 ret = btrfs_next_leaf(extent_root, path);
5539 if (ret < 0)
5540 goto out;
5541 if (ret > 0) {
5542 /* the extent was freed by someone */
5543 if (ref_path->lowest_level == level)
5544 goto out;
5545 btrfs_release_path(extent_root, path);
5546 goto walk_down;
5547 }
5548 leaf = path->nodes[0];
5549 }
5550
5551 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5552 if (found_key.objectid != bytenr ||
5553 found_key.type != BTRFS_EXTENT_REF_KEY) {
5554 /* the extent was freed by someone */
5555 if (ref_path->lowest_level == level) {
5556 ret = 1;
5557 goto out;
5558 }
5559 btrfs_release_path(extent_root, path);
5560 goto walk_down;
5561 }
5562 found:
5563 ref = btrfs_item_ptr(leaf, path->slots[0],
5564 struct btrfs_extent_ref);
5565 ref_objectid = btrfs_ref_objectid(leaf, ref);
5566 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5567 if (first_time) {
5568 level = (int)ref_objectid;
5569 BUG_ON(level >= BTRFS_MAX_LEVEL);
5570 ref_path->lowest_level = level;
5571 ref_path->current_level = level;
5572 ref_path->nodes[level] = bytenr;
5573 } else {
5574 WARN_ON(ref_objectid != level);
5575 }
5576 } else {
5577 WARN_ON(level != -1);
5578 }
5579 first_time = 0;
5580
5581 if (ref_path->lowest_level == level) {
5582 ref_path->owner_objectid = ref_objectid;
5583 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
5584 }
5585
5586 /*
5587 * the block is tree root or the block isn't in reference
5588 * counted tree.
5589 */
5590 if (found_key.objectid == found_key.offset ||
5591 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
5592 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5593 ref_path->root_generation =
5594 btrfs_ref_generation(leaf, ref);
5595 if (level < 0) {
5596 /* special reference from the tree log */
5597 ref_path->nodes[0] = found_key.offset;
5598 ref_path->current_level = 0;
5599 }
5600 ret = 0;
5601 goto out;
5602 }
5603
5604 level++;
5605 BUG_ON(ref_path->nodes[level] != 0);
5606 ref_path->nodes[level] = found_key.offset;
5607 ref_path->current_level = level;
5608
5609 /*
5610 * the reference was created in the running transaction,
5611 * no need to continue walking up.
5612 */
5613 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
5614 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5615 ref_path->root_generation =
5616 btrfs_ref_generation(leaf, ref);
5617 ret = 0;
5618 goto out;
5619 }
5620
5621 btrfs_release_path(extent_root, path);
5622 cond_resched();
5623 }
5624 /* reached max tree level, but no tree root found. */
5625 BUG();
5626 out:
5627 btrfs_free_path(path);
5628 return ret;
5629 }
5630
5631 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
5632 struct btrfs_root *extent_root,
5633 struct btrfs_ref_path *ref_path,
5634 u64 extent_start)
5635 {
5636 memset(ref_path, 0, sizeof(*ref_path));
5637 ref_path->extent_start = extent_start;
5638
5639 return __next_ref_path(trans, extent_root, ref_path, 1);
5640 }
5641
5642 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
5643 struct btrfs_root *extent_root,
5644 struct btrfs_ref_path *ref_path)
5645 {
5646 return __next_ref_path(trans, extent_root, ref_path, 0);
5647 }
5648
5649 static noinline int get_new_locations(struct inode *reloc_inode,
5650 struct btrfs_key *extent_key,
5651 u64 offset, int no_fragment,
5652 struct disk_extent **extents,
5653 int *nr_extents)
5654 {
5655 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5656 struct btrfs_path *path;
5657 struct btrfs_file_extent_item *fi;
5658 struct extent_buffer *leaf;
5659 struct disk_extent *exts = *extents;
5660 struct btrfs_key found_key;
5661 u64 cur_pos;
5662 u64 last_byte;
5663 u32 nritems;
5664 int nr = 0;
5665 int max = *nr_extents;
5666 int ret;
5667
5668 WARN_ON(!no_fragment && *extents);
5669 if (!exts) {
5670 max = 1;
5671 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
5672 if (!exts)
5673 return -ENOMEM;
5674 }
5675
5676 path = btrfs_alloc_path();
5677 BUG_ON(!path);
5678
5679 cur_pos = extent_key->objectid - offset;
5680 last_byte = extent_key->objectid + extent_key->offset;
5681 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
5682 cur_pos, 0);
5683 if (ret < 0)
5684 goto out;
5685 if (ret > 0) {
5686 ret = -ENOENT;
5687 goto out;
5688 }
5689
5690 while (1) {
5691 leaf = path->nodes[0];
5692 nritems = btrfs_header_nritems(leaf);
5693 if (path->slots[0] >= nritems) {
5694 ret = btrfs_next_leaf(root, path);
5695 if (ret < 0)
5696 goto out;
5697 if (ret > 0)
5698 break;
5699 leaf = path->nodes[0];
5700 }
5701
5702 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5703 if (found_key.offset != cur_pos ||
5704 found_key.type != BTRFS_EXTENT_DATA_KEY ||
5705 found_key.objectid != reloc_inode->i_ino)
5706 break;
5707
5708 fi = btrfs_item_ptr(leaf, path->slots[0],
5709 struct btrfs_file_extent_item);
5710 if (btrfs_file_extent_type(leaf, fi) !=
5711 BTRFS_FILE_EXTENT_REG ||
5712 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5713 break;
5714
5715 if (nr == max) {
5716 struct disk_extent *old = exts;
5717 max *= 2;
5718 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
5719 memcpy(exts, old, sizeof(*exts) * nr);
5720 if (old != *extents)
5721 kfree(old);
5722 }
5723
5724 exts[nr].disk_bytenr =
5725 btrfs_file_extent_disk_bytenr(leaf, fi);
5726 exts[nr].disk_num_bytes =
5727 btrfs_file_extent_disk_num_bytes(leaf, fi);
5728 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
5729 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5730 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
5731 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
5732 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
5733 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
5734 fi);
5735 BUG_ON(exts[nr].offset > 0);
5736 BUG_ON(exts[nr].compression || exts[nr].encryption);
5737 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
5738
5739 cur_pos += exts[nr].num_bytes;
5740 nr++;
5741
5742 if (cur_pos + offset >= last_byte)
5743 break;
5744
5745 if (no_fragment) {
5746 ret = 1;
5747 goto out;
5748 }
5749 path->slots[0]++;
5750 }
5751
5752 BUG_ON(cur_pos + offset > last_byte);
5753 if (cur_pos + offset < last_byte) {
5754 ret = -ENOENT;
5755 goto out;
5756 }
5757 ret = 0;
5758 out:
5759 btrfs_free_path(path);
5760 if (ret) {
5761 if (exts != *extents)
5762 kfree(exts);
5763 } else {
5764 *extents = exts;
5765 *nr_extents = nr;
5766 }
5767 return ret;
5768 }
5769
5770 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
5771 struct btrfs_root *root,
5772 struct btrfs_path *path,
5773 struct btrfs_key *extent_key,
5774 struct btrfs_key *leaf_key,
5775 struct btrfs_ref_path *ref_path,
5776 struct disk_extent *new_extents,
5777 int nr_extents)
5778 {
5779 struct extent_buffer *leaf;
5780 struct btrfs_file_extent_item *fi;
5781 struct inode *inode = NULL;
5782 struct btrfs_key key;
5783 u64 lock_start = 0;
5784 u64 lock_end = 0;
5785 u64 num_bytes;
5786 u64 ext_offset;
5787 u64 search_end = (u64)-1;
5788 u32 nritems;
5789 int nr_scaned = 0;
5790 int extent_locked = 0;
5791 int extent_type;
5792 int ret;
5793
5794 memcpy(&key, leaf_key, sizeof(key));
5795 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5796 if (key.objectid < ref_path->owner_objectid ||
5797 (key.objectid == ref_path->owner_objectid &&
5798 key.type < BTRFS_EXTENT_DATA_KEY)) {
5799 key.objectid = ref_path->owner_objectid;
5800 key.type = BTRFS_EXTENT_DATA_KEY;
5801 key.offset = 0;
5802 }
5803 }
5804
5805 while (1) {
5806 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
5807 if (ret < 0)
5808 goto out;
5809
5810 leaf = path->nodes[0];
5811 nritems = btrfs_header_nritems(leaf);
5812 next:
5813 if (extent_locked && ret > 0) {
5814 /*
5815 * the file extent item was modified by someone
5816 * before the extent got locked.
5817 */
5818 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5819 lock_end, GFP_NOFS);
5820 extent_locked = 0;
5821 }
5822
5823 if (path->slots[0] >= nritems) {
5824 if (++nr_scaned > 2)
5825 break;
5826
5827 BUG_ON(extent_locked);
5828 ret = btrfs_next_leaf(root, path);
5829 if (ret < 0)
5830 goto out;
5831 if (ret > 0)
5832 break;
5833 leaf = path->nodes[0];
5834 nritems = btrfs_header_nritems(leaf);
5835 }
5836
5837 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5838
5839 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
5840 if ((key.objectid > ref_path->owner_objectid) ||
5841 (key.objectid == ref_path->owner_objectid &&
5842 key.type > BTRFS_EXTENT_DATA_KEY) ||
5843 key.offset >= search_end)
5844 break;
5845 }
5846
5847 if (inode && key.objectid != inode->i_ino) {
5848 BUG_ON(extent_locked);
5849 btrfs_release_path(root, path);
5850 mutex_unlock(&inode->i_mutex);
5851 iput(inode);
5852 inode = NULL;
5853 continue;
5854 }
5855
5856 if (key.type != BTRFS_EXTENT_DATA_KEY) {
5857 path->slots[0]++;
5858 ret = 1;
5859 goto next;
5860 }
5861 fi = btrfs_item_ptr(leaf, path->slots[0],
5862 struct btrfs_file_extent_item);
5863 extent_type = btrfs_file_extent_type(leaf, fi);
5864 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
5865 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
5866 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
5867 extent_key->objectid)) {
5868 path->slots[0]++;
5869 ret = 1;
5870 goto next;
5871 }
5872
5873 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5874 ext_offset = btrfs_file_extent_offset(leaf, fi);
5875
5876 if (search_end == (u64)-1) {
5877 search_end = key.offset - ext_offset +
5878 btrfs_file_extent_ram_bytes(leaf, fi);
5879 }
5880
5881 if (!extent_locked) {
5882 lock_start = key.offset;
5883 lock_end = lock_start + num_bytes - 1;
5884 } else {
5885 if (lock_start > key.offset ||
5886 lock_end + 1 < key.offset + num_bytes) {
5887 unlock_extent(&BTRFS_I(inode)->io_tree,
5888 lock_start, lock_end, GFP_NOFS);
5889 extent_locked = 0;
5890 }
5891 }
5892
5893 if (!inode) {
5894 btrfs_release_path(root, path);
5895
5896 inode = btrfs_iget_locked(root->fs_info->sb,
5897 key.objectid, root);
5898 if (inode->i_state & I_NEW) {
5899 BTRFS_I(inode)->root = root;
5900 BTRFS_I(inode)->location.objectid =
5901 key.objectid;
5902 BTRFS_I(inode)->location.type =
5903 BTRFS_INODE_ITEM_KEY;
5904 BTRFS_I(inode)->location.offset = 0;
5905 btrfs_read_locked_inode(inode);
5906 unlock_new_inode(inode);
5907 }
5908 /*
5909 * some code call btrfs_commit_transaction while
5910 * holding the i_mutex, so we can't use mutex_lock
5911 * here.
5912 */
5913 if (is_bad_inode(inode) ||
5914 !mutex_trylock(&inode->i_mutex)) {
5915 iput(inode);
5916 inode = NULL;
5917 key.offset = (u64)-1;
5918 goto skip;
5919 }
5920 }
5921
5922 if (!extent_locked) {
5923 struct btrfs_ordered_extent *ordered;
5924
5925 btrfs_release_path(root, path);
5926
5927 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5928 lock_end, GFP_NOFS);
5929 ordered = btrfs_lookup_first_ordered_extent(inode,
5930 lock_end);
5931 if (ordered &&
5932 ordered->file_offset <= lock_end &&
5933 ordered->file_offset + ordered->len > lock_start) {
5934 unlock_extent(&BTRFS_I(inode)->io_tree,
5935 lock_start, lock_end, GFP_NOFS);
5936 btrfs_start_ordered_extent(inode, ordered, 1);
5937 btrfs_put_ordered_extent(ordered);
5938 key.offset += num_bytes;
5939 goto skip;
5940 }
5941 if (ordered)
5942 btrfs_put_ordered_extent(ordered);
5943
5944 extent_locked = 1;
5945 continue;
5946 }
5947
5948 if (nr_extents == 1) {
5949 /* update extent pointer in place */
5950 btrfs_set_file_extent_disk_bytenr(leaf, fi,
5951 new_extents[0].disk_bytenr);
5952 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5953 new_extents[0].disk_num_bytes);
5954 btrfs_mark_buffer_dirty(leaf);
5955
5956 btrfs_drop_extent_cache(inode, key.offset,
5957 key.offset + num_bytes - 1, 0);
5958
5959 ret = btrfs_inc_extent_ref(trans, root,
5960 new_extents[0].disk_bytenr,
5961 new_extents[0].disk_num_bytes,
5962 leaf->start,
5963 root->root_key.objectid,
5964 trans->transid,
5965 key.objectid);
5966 BUG_ON(ret);
5967
5968 ret = btrfs_free_extent(trans, root,
5969 extent_key->objectid,
5970 extent_key->offset,
5971 leaf->start,
5972 btrfs_header_owner(leaf),
5973 btrfs_header_generation(leaf),
5974 key.objectid, 0);
5975 BUG_ON(ret);
5976
5977 btrfs_release_path(root, path);
5978 key.offset += num_bytes;
5979 } else {
5980 BUG_ON(1);
5981 #if 0
5982 u64 alloc_hint;
5983 u64 extent_len;
5984 int i;
5985 /*
5986 * drop old extent pointer at first, then insert the
5987 * new pointers one bye one
5988 */
5989 btrfs_release_path(root, path);
5990 ret = btrfs_drop_extents(trans, root, inode, key.offset,
5991 key.offset + num_bytes,
5992 key.offset, &alloc_hint);
5993 BUG_ON(ret);
5994
5995 for (i = 0; i < nr_extents; i++) {
5996 if (ext_offset >= new_extents[i].num_bytes) {
5997 ext_offset -= new_extents[i].num_bytes;
5998 continue;
5999 }
6000 extent_len = min(new_extents[i].num_bytes -
6001 ext_offset, num_bytes);
6002
6003 ret = btrfs_insert_empty_item(trans, root,
6004 path, &key,
6005 sizeof(*fi));
6006 BUG_ON(ret);
6007
6008 leaf = path->nodes[0];
6009 fi = btrfs_item_ptr(leaf, path->slots[0],
6010 struct btrfs_file_extent_item);
6011 btrfs_set_file_extent_generation(leaf, fi,
6012 trans->transid);
6013 btrfs_set_file_extent_type(leaf, fi,
6014 BTRFS_FILE_EXTENT_REG);
6015 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6016 new_extents[i].disk_bytenr);
6017 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6018 new_extents[i].disk_num_bytes);
6019 btrfs_set_file_extent_ram_bytes(leaf, fi,
6020 new_extents[i].ram_bytes);
6021
6022 btrfs_set_file_extent_compression(leaf, fi,
6023 new_extents[i].compression);
6024 btrfs_set_file_extent_encryption(leaf, fi,
6025 new_extents[i].encryption);
6026 btrfs_set_file_extent_other_encoding(leaf, fi,
6027 new_extents[i].other_encoding);
6028
6029 btrfs_set_file_extent_num_bytes(leaf, fi,
6030 extent_len);
6031 ext_offset += new_extents[i].offset;
6032 btrfs_set_file_extent_offset(leaf, fi,
6033 ext_offset);
6034 btrfs_mark_buffer_dirty(leaf);
6035
6036 btrfs_drop_extent_cache(inode, key.offset,
6037 key.offset + extent_len - 1, 0);
6038
6039 ret = btrfs_inc_extent_ref(trans, root,
6040 new_extents[i].disk_bytenr,
6041 new_extents[i].disk_num_bytes,
6042 leaf->start,
6043 root->root_key.objectid,
6044 trans->transid, key.objectid);
6045 BUG_ON(ret);
6046 btrfs_release_path(root, path);
6047
6048 inode_add_bytes(inode, extent_len);
6049
6050 ext_offset = 0;
6051 num_bytes -= extent_len;
6052 key.offset += extent_len;
6053
6054 if (num_bytes == 0)
6055 break;
6056 }
6057 BUG_ON(i >= nr_extents);
6058 #endif
6059 }
6060
6061 if (extent_locked) {
6062 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6063 lock_end, GFP_NOFS);
6064 extent_locked = 0;
6065 }
6066 skip:
6067 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
6068 key.offset >= search_end)
6069 break;
6070
6071 cond_resched();
6072 }
6073 ret = 0;
6074 out:
6075 btrfs_release_path(root, path);
6076 if (inode) {
6077 mutex_unlock(&inode->i_mutex);
6078 if (extent_locked) {
6079 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6080 lock_end, GFP_NOFS);
6081 }
6082 iput(inode);
6083 }
6084 return ret;
6085 }
6086
6087 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6088 struct btrfs_root *root,
6089 struct extent_buffer *buf, u64 orig_start)
6090 {
6091 int level;
6092 int ret;
6093
6094 BUG_ON(btrfs_header_generation(buf) != trans->transid);
6095 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6096
6097 level = btrfs_header_level(buf);
6098 if (level == 0) {
6099 struct btrfs_leaf_ref *ref;
6100 struct btrfs_leaf_ref *orig_ref;
6101
6102 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6103 if (!orig_ref)
6104 return -ENOENT;
6105
6106 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6107 if (!ref) {
6108 btrfs_free_leaf_ref(root, orig_ref);
6109 return -ENOMEM;
6110 }
6111
6112 ref->nritems = orig_ref->nritems;
6113 memcpy(ref->extents, orig_ref->extents,
6114 sizeof(ref->extents[0]) * ref->nritems);
6115
6116 btrfs_free_leaf_ref(root, orig_ref);
6117
6118 ref->root_gen = trans->transid;
6119 ref->bytenr = buf->start;
6120 ref->owner = btrfs_header_owner(buf);
6121 ref->generation = btrfs_header_generation(buf);
6122
6123 ret = btrfs_add_leaf_ref(root, ref, 0);
6124 WARN_ON(ret);
6125 btrfs_free_leaf_ref(root, ref);
6126 }
6127 return 0;
6128 }
6129
6130 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6131 struct extent_buffer *leaf,
6132 struct btrfs_block_group_cache *group,
6133 struct btrfs_root *target_root)
6134 {
6135 struct btrfs_key key;
6136 struct inode *inode = NULL;
6137 struct btrfs_file_extent_item *fi;
6138 u64 num_bytes;
6139 u64 skip_objectid = 0;
6140 u32 nritems;
6141 u32 i;
6142
6143 nritems = btrfs_header_nritems(leaf);
6144 for (i = 0; i < nritems; i++) {
6145 btrfs_item_key_to_cpu(leaf, &key, i);
6146 if (key.objectid == skip_objectid ||
6147 key.type != BTRFS_EXTENT_DATA_KEY)
6148 continue;
6149 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6150 if (btrfs_file_extent_type(leaf, fi) ==
6151 BTRFS_FILE_EXTENT_INLINE)
6152 continue;
6153 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6154 continue;
6155 if (!inode || inode->i_ino != key.objectid) {
6156 iput(inode);
6157 inode = btrfs_ilookup(target_root->fs_info->sb,
6158 key.objectid, target_root, 1);
6159 }
6160 if (!inode) {
6161 skip_objectid = key.objectid;
6162 continue;
6163 }
6164 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6165
6166 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6167 key.offset + num_bytes - 1, GFP_NOFS);
6168 btrfs_drop_extent_cache(inode, key.offset,
6169 key.offset + num_bytes - 1, 1);
6170 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6171 key.offset + num_bytes - 1, GFP_NOFS);
6172 cond_resched();
6173 }
6174 iput(inode);
6175 return 0;
6176 }
6177
6178 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6179 struct btrfs_root *root,
6180 struct extent_buffer *leaf,
6181 struct btrfs_block_group_cache *group,
6182 struct inode *reloc_inode)
6183 {
6184 struct btrfs_key key;
6185 struct btrfs_key extent_key;
6186 struct btrfs_file_extent_item *fi;
6187 struct btrfs_leaf_ref *ref;
6188 struct disk_extent *new_extent;
6189 u64 bytenr;
6190 u64 num_bytes;
6191 u32 nritems;
6192 u32 i;
6193 int ext_index;
6194 int nr_extent;
6195 int ret;
6196
6197 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6198 BUG_ON(!new_extent);
6199
6200 ref = btrfs_lookup_leaf_ref(root, leaf->start);
6201 BUG_ON(!ref);
6202
6203 ext_index = -1;
6204 nritems = btrfs_header_nritems(leaf);
6205 for (i = 0; i < nritems; i++) {
6206 btrfs_item_key_to_cpu(leaf, &key, i);
6207 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6208 continue;
6209 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6210 if (btrfs_file_extent_type(leaf, fi) ==
6211 BTRFS_FILE_EXTENT_INLINE)
6212 continue;
6213 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6214 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6215 if (bytenr == 0)
6216 continue;
6217
6218 ext_index++;
6219 if (bytenr >= group->key.objectid + group->key.offset ||
6220 bytenr + num_bytes <= group->key.objectid)
6221 continue;
6222
6223 extent_key.objectid = bytenr;
6224 extent_key.offset = num_bytes;
6225 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6226 nr_extent = 1;
6227 ret = get_new_locations(reloc_inode, &extent_key,
6228 group->key.objectid, 1,
6229 &new_extent, &nr_extent);
6230 if (ret > 0)
6231 continue;
6232 BUG_ON(ret < 0);
6233
6234 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6235 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6236 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6237 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6238
6239 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6240 new_extent->disk_bytenr);
6241 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6242 new_extent->disk_num_bytes);
6243 btrfs_mark_buffer_dirty(leaf);
6244
6245 ret = btrfs_inc_extent_ref(trans, root,
6246 new_extent->disk_bytenr,
6247 new_extent->disk_num_bytes,
6248 leaf->start,
6249 root->root_key.objectid,
6250 trans->transid, key.objectid);
6251 BUG_ON(ret);
6252
6253 ret = btrfs_free_extent(trans, root,
6254 bytenr, num_bytes, leaf->start,
6255 btrfs_header_owner(leaf),
6256 btrfs_header_generation(leaf),
6257 key.objectid, 0);
6258 BUG_ON(ret);
6259 cond_resched();
6260 }
6261 kfree(new_extent);
6262 BUG_ON(ext_index + 1 != ref->nritems);
6263 btrfs_free_leaf_ref(root, ref);
6264 return 0;
6265 }
6266
6267 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6268 struct btrfs_root *root)
6269 {
6270 struct btrfs_root *reloc_root;
6271 int ret;
6272
6273 if (root->reloc_root) {
6274 reloc_root = root->reloc_root;
6275 root->reloc_root = NULL;
6276 list_add(&reloc_root->dead_list,
6277 &root->fs_info->dead_reloc_roots);
6278
6279 btrfs_set_root_bytenr(&reloc_root->root_item,
6280 reloc_root->node->start);
6281 btrfs_set_root_level(&root->root_item,
6282 btrfs_header_level(reloc_root->node));
6283 memset(&reloc_root->root_item.drop_progress, 0,
6284 sizeof(struct btrfs_disk_key));
6285 reloc_root->root_item.drop_level = 0;
6286
6287 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6288 &reloc_root->root_key,
6289 &reloc_root->root_item);
6290 BUG_ON(ret);
6291 }
6292 return 0;
6293 }
6294
6295 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6296 {
6297 struct btrfs_trans_handle *trans;
6298 struct btrfs_root *reloc_root;
6299 struct btrfs_root *prev_root = NULL;
6300 struct list_head dead_roots;
6301 int ret;
6302 unsigned long nr;
6303
6304 INIT_LIST_HEAD(&dead_roots);
6305 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6306
6307 while (!list_empty(&dead_roots)) {
6308 reloc_root = list_entry(dead_roots.prev,
6309 struct btrfs_root, dead_list);
6310 list_del_init(&reloc_root->dead_list);
6311
6312 BUG_ON(reloc_root->commit_root != NULL);
6313 while (1) {
6314 trans = btrfs_join_transaction(root, 1);
6315 BUG_ON(!trans);
6316
6317 mutex_lock(&root->fs_info->drop_mutex);
6318 ret = btrfs_drop_snapshot(trans, reloc_root);
6319 if (ret != -EAGAIN)
6320 break;
6321 mutex_unlock(&root->fs_info->drop_mutex);
6322
6323 nr = trans->blocks_used;
6324 ret = btrfs_end_transaction(trans, root);
6325 BUG_ON(ret);
6326 btrfs_btree_balance_dirty(root, nr);
6327 }
6328
6329 free_extent_buffer(reloc_root->node);
6330
6331 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6332 &reloc_root->root_key);
6333 BUG_ON(ret);
6334 mutex_unlock(&root->fs_info->drop_mutex);
6335
6336 nr = trans->blocks_used;
6337 ret = btrfs_end_transaction(trans, root);
6338 BUG_ON(ret);
6339 btrfs_btree_balance_dirty(root, nr);
6340
6341 kfree(prev_root);
6342 prev_root = reloc_root;
6343 }
6344 if (prev_root) {
6345 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6346 kfree(prev_root);
6347 }
6348 return 0;
6349 }
6350
6351 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6352 {
6353 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6354 return 0;
6355 }
6356
6357 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6358 {
6359 struct btrfs_root *reloc_root;
6360 struct btrfs_trans_handle *trans;
6361 struct btrfs_key location;
6362 int found;
6363 int ret;
6364
6365 mutex_lock(&root->fs_info->tree_reloc_mutex);
6366 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6367 BUG_ON(ret);
6368 found = !list_empty(&root->fs_info->dead_reloc_roots);
6369 mutex_unlock(&root->fs_info->tree_reloc_mutex);
6370
6371 if (found) {
6372 trans = btrfs_start_transaction(root, 1);
6373 BUG_ON(!trans);
6374 ret = btrfs_commit_transaction(trans, root);
6375 BUG_ON(ret);
6376 }
6377
6378 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6379 location.offset = (u64)-1;
6380 location.type = BTRFS_ROOT_ITEM_KEY;
6381
6382 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6383 BUG_ON(!reloc_root);
6384 btrfs_orphan_cleanup(reloc_root);
6385 return 0;
6386 }
6387
6388 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6389 struct btrfs_root *root)
6390 {
6391 struct btrfs_root *reloc_root;
6392 struct extent_buffer *eb;
6393 struct btrfs_root_item *root_item;
6394 struct btrfs_key root_key;
6395 int ret;
6396
6397 BUG_ON(!root->ref_cows);
6398 if (root->reloc_root)
6399 return 0;
6400
6401 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6402 BUG_ON(!root_item);
6403
6404 ret = btrfs_copy_root(trans, root, root->commit_root,
6405 &eb, BTRFS_TREE_RELOC_OBJECTID);
6406 BUG_ON(ret);
6407
6408 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6409 root_key.offset = root->root_key.objectid;
6410 root_key.type = BTRFS_ROOT_ITEM_KEY;
6411
6412 memcpy(root_item, &root->root_item, sizeof(root_item));
6413 btrfs_set_root_refs(root_item, 0);
6414 btrfs_set_root_bytenr(root_item, eb->start);
6415 btrfs_set_root_level(root_item, btrfs_header_level(eb));
6416 btrfs_set_root_generation(root_item, trans->transid);
6417
6418 btrfs_tree_unlock(eb);
6419 free_extent_buffer(eb);
6420
6421 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6422 &root_key, root_item);
6423 BUG_ON(ret);
6424 kfree(root_item);
6425
6426 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6427 &root_key);
6428 BUG_ON(!reloc_root);
6429 reloc_root->last_trans = trans->transid;
6430 reloc_root->commit_root = NULL;
6431 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6432
6433 root->reloc_root = reloc_root;
6434 return 0;
6435 }
6436
6437 /*
6438 * Core function of space balance.
6439 *
6440 * The idea is using reloc trees to relocate tree blocks in reference
6441 * counted roots. There is one reloc tree for each subvol, and all
6442 * reloc trees share same root key objectid. Reloc trees are snapshots
6443 * of the latest committed roots of subvols (root->commit_root).
6444 *
6445 * To relocate a tree block referenced by a subvol, there are two steps.
6446 * COW the block through subvol's reloc tree, then update block pointer
6447 * in the subvol to point to the new block. Since all reloc trees share
6448 * same root key objectid, doing special handing for tree blocks owned
6449 * by them is easy. Once a tree block has been COWed in one reloc tree,
6450 * we can use the resulting new block directly when the same block is
6451 * required to COW again through other reloc trees. By this way, relocated
6452 * tree blocks are shared between reloc trees, so they are also shared
6453 * between subvols.
6454 */
6455 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6456 struct btrfs_root *root,
6457 struct btrfs_path *path,
6458 struct btrfs_key *first_key,
6459 struct btrfs_ref_path *ref_path,
6460 struct btrfs_block_group_cache *group,
6461 struct inode *reloc_inode)
6462 {
6463 struct btrfs_root *reloc_root;
6464 struct extent_buffer *eb = NULL;
6465 struct btrfs_key *keys;
6466 u64 *nodes;
6467 int level;
6468 int shared_level;
6469 int lowest_level = 0;
6470 int ret;
6471
6472 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6473 lowest_level = ref_path->owner_objectid;
6474
6475 if (!root->ref_cows) {
6476 path->lowest_level = lowest_level;
6477 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6478 BUG_ON(ret < 0);
6479 path->lowest_level = 0;
6480 btrfs_release_path(root, path);
6481 return 0;
6482 }
6483
6484 mutex_lock(&root->fs_info->tree_reloc_mutex);
6485 ret = init_reloc_tree(trans, root);
6486 BUG_ON(ret);
6487 reloc_root = root->reloc_root;
6488
6489 shared_level = ref_path->shared_level;
6490 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6491
6492 keys = ref_path->node_keys;
6493 nodes = ref_path->new_nodes;
6494 memset(&keys[shared_level + 1], 0,
6495 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6496 memset(&nodes[shared_level + 1], 0,
6497 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6498
6499 if (nodes[lowest_level] == 0) {
6500 path->lowest_level = lowest_level;
6501 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6502 0, 1);
6503 BUG_ON(ret);
6504 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6505 eb = path->nodes[level];
6506 if (!eb || eb == reloc_root->node)
6507 break;
6508 nodes[level] = eb->start;
6509 if (level == 0)
6510 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6511 else
6512 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6513 }
6514 if (nodes[0] &&
6515 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6516 eb = path->nodes[0];
6517 ret = replace_extents_in_leaf(trans, reloc_root, eb,
6518 group, reloc_inode);
6519 BUG_ON(ret);
6520 }
6521 btrfs_release_path(reloc_root, path);
6522 } else {
6523 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6524 lowest_level);
6525 BUG_ON(ret);
6526 }
6527
6528 /*
6529 * replace tree blocks in the fs tree with tree blocks in
6530 * the reloc tree.
6531 */
6532 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6533 BUG_ON(ret < 0);
6534
6535 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6536 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6537 0, 0);
6538 BUG_ON(ret);
6539 extent_buffer_get(path->nodes[0]);
6540 eb = path->nodes[0];
6541 btrfs_release_path(reloc_root, path);
6542 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6543 BUG_ON(ret);
6544 free_extent_buffer(eb);
6545 }
6546
6547 mutex_unlock(&root->fs_info->tree_reloc_mutex);
6548 path->lowest_level = 0;
6549 return 0;
6550 }
6551
6552 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6553 struct btrfs_root *root,
6554 struct btrfs_path *path,
6555 struct btrfs_key *first_key,
6556 struct btrfs_ref_path *ref_path)
6557 {
6558 int ret;
6559
6560 ret = relocate_one_path(trans, root, path, first_key,
6561 ref_path, NULL, NULL);
6562 BUG_ON(ret);
6563
6564 return 0;
6565 }
6566
6567 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
6568 struct btrfs_root *extent_root,
6569 struct btrfs_path *path,
6570 struct btrfs_key *extent_key)
6571 {
6572 int ret;
6573
6574 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6575 if (ret)
6576 goto out;
6577 ret = btrfs_del_item(trans, extent_root, path);
6578 out:
6579 btrfs_release_path(extent_root, path);
6580 return ret;
6581 }
6582
6583 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
6584 struct btrfs_ref_path *ref_path)
6585 {
6586 struct btrfs_key root_key;
6587
6588 root_key.objectid = ref_path->root_objectid;
6589 root_key.type = BTRFS_ROOT_ITEM_KEY;
6590 if (is_cowonly_root(ref_path->root_objectid))
6591 root_key.offset = 0;
6592 else
6593 root_key.offset = (u64)-1;
6594
6595 return btrfs_read_fs_root_no_name(fs_info, &root_key);
6596 }
6597
6598 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
6599 struct btrfs_path *path,
6600 struct btrfs_key *extent_key,
6601 struct btrfs_block_group_cache *group,
6602 struct inode *reloc_inode, int pass)
6603 {
6604 struct btrfs_trans_handle *trans;
6605 struct btrfs_root *found_root;
6606 struct btrfs_ref_path *ref_path = NULL;
6607 struct disk_extent *new_extents = NULL;
6608 int nr_extents = 0;
6609 int loops;
6610 int ret;
6611 int level;
6612 struct btrfs_key first_key;
6613 u64 prev_block = 0;
6614
6615
6616 trans = btrfs_start_transaction(extent_root, 1);
6617 BUG_ON(!trans);
6618
6619 if (extent_key->objectid == 0) {
6620 ret = del_extent_zero(trans, extent_root, path, extent_key);
6621 goto out;
6622 }
6623
6624 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
6625 if (!ref_path) {
6626 ret = -ENOMEM;
6627 goto out;
6628 }
6629
6630 for (loops = 0; ; loops++) {
6631 if (loops == 0) {
6632 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
6633 extent_key->objectid);
6634 } else {
6635 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
6636 }
6637 if (ret < 0)
6638 goto out;
6639 if (ret > 0)
6640 break;
6641
6642 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6643 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
6644 continue;
6645
6646 found_root = read_ref_root(extent_root->fs_info, ref_path);
6647 BUG_ON(!found_root);
6648 /*
6649 * for reference counted tree, only process reference paths
6650 * rooted at the latest committed root.
6651 */
6652 if (found_root->ref_cows &&
6653 ref_path->root_generation != found_root->root_key.offset)
6654 continue;
6655
6656 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6657 if (pass == 0) {
6658 /*
6659 * copy data extents to new locations
6660 */
6661 u64 group_start = group->key.objectid;
6662 ret = relocate_data_extent(reloc_inode,
6663 extent_key,
6664 group_start);
6665 if (ret < 0)
6666 goto out;
6667 break;
6668 }
6669 level = 0;
6670 } else {
6671 level = ref_path->owner_objectid;
6672 }
6673
6674 if (prev_block != ref_path->nodes[level]) {
6675 struct extent_buffer *eb;
6676 u64 block_start = ref_path->nodes[level];
6677 u64 block_size = btrfs_level_size(found_root, level);
6678
6679 eb = read_tree_block(found_root, block_start,
6680 block_size, 0);
6681 btrfs_tree_lock(eb);
6682 BUG_ON(level != btrfs_header_level(eb));
6683
6684 if (level == 0)
6685 btrfs_item_key_to_cpu(eb, &first_key, 0);
6686 else
6687 btrfs_node_key_to_cpu(eb, &first_key, 0);
6688
6689 btrfs_tree_unlock(eb);
6690 free_extent_buffer(eb);
6691 prev_block = block_start;
6692 }
6693
6694 mutex_lock(&extent_root->fs_info->trans_mutex);
6695 btrfs_record_root_in_trans(found_root);
6696 mutex_unlock(&extent_root->fs_info->trans_mutex);
6697 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6698 /*
6699 * try to update data extent references while
6700 * keeping metadata shared between snapshots.
6701 */
6702 if (pass == 1) {
6703 ret = relocate_one_path(trans, found_root,
6704 path, &first_key, ref_path,
6705 group, reloc_inode);
6706 if (ret < 0)
6707 goto out;
6708 continue;
6709 }
6710 /*
6711 * use fallback method to process the remaining
6712 * references.
6713 */
6714 if (!new_extents) {
6715 u64 group_start = group->key.objectid;
6716 new_extents = kmalloc(sizeof(*new_extents),
6717 GFP_NOFS);
6718 nr_extents = 1;
6719 ret = get_new_locations(reloc_inode,
6720 extent_key,
6721 group_start, 1,
6722 &new_extents,
6723 &nr_extents);
6724 if (ret)
6725 goto out;
6726 }
6727 ret = replace_one_extent(trans, found_root,
6728 path, extent_key,
6729 &first_key, ref_path,
6730 new_extents, nr_extents);
6731 } else {
6732 ret = relocate_tree_block(trans, found_root, path,
6733 &first_key, ref_path);
6734 }
6735 if (ret < 0)
6736 goto out;
6737 }
6738 ret = 0;
6739 out:
6740 btrfs_end_transaction(trans, extent_root);
6741 kfree(new_extents);
6742 kfree(ref_path);
6743 return ret;
6744 }
6745 #endif
6746
6747 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6748 {
6749 u64 num_devices;
6750 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6751 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6752
6753 num_devices = root->fs_info->fs_devices->rw_devices;
6754 if (num_devices == 1) {
6755 stripped |= BTRFS_BLOCK_GROUP_DUP;
6756 stripped = flags & ~stripped;
6757
6758 /* turn raid0 into single device chunks */
6759 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6760 return stripped;
6761
6762 /* turn mirroring into duplication */
6763 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6764 BTRFS_BLOCK_GROUP_RAID10))
6765 return stripped | BTRFS_BLOCK_GROUP_DUP;
6766 return flags;
6767 } else {
6768 /* they already had raid on here, just return */
6769 if (flags & stripped)
6770 return flags;
6771
6772 stripped |= BTRFS_BLOCK_GROUP_DUP;
6773 stripped = flags & ~stripped;
6774
6775 /* switch duplicated blocks with raid1 */
6776 if (flags & BTRFS_BLOCK_GROUP_DUP)
6777 return stripped | BTRFS_BLOCK_GROUP_RAID1;
6778
6779 /* turn single device chunks into raid0 */
6780 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6781 }
6782 return flags;
6783 }
6784
6785 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
6786 struct btrfs_block_group_cache *shrink_block_group,
6787 int force)
6788 {
6789 struct btrfs_trans_handle *trans;
6790 u64 new_alloc_flags;
6791 u64 calc;
6792
6793 spin_lock(&shrink_block_group->lock);
6794 if (btrfs_block_group_used(&shrink_block_group->item) +
6795 shrink_block_group->reserved > 0) {
6796 spin_unlock(&shrink_block_group->lock);
6797
6798 trans = btrfs_start_transaction(root, 1);
6799 spin_lock(&shrink_block_group->lock);
6800
6801 new_alloc_flags = update_block_group_flags(root,
6802 shrink_block_group->flags);
6803 if (new_alloc_flags != shrink_block_group->flags) {
6804 calc =
6805 btrfs_block_group_used(&shrink_block_group->item);
6806 } else {
6807 calc = shrink_block_group->key.offset;
6808 }
6809 spin_unlock(&shrink_block_group->lock);
6810
6811 do_chunk_alloc(trans, root->fs_info->extent_root,
6812 calc + 2 * 1024 * 1024, new_alloc_flags, force);
6813
6814 btrfs_end_transaction(trans, root);
6815 } else
6816 spin_unlock(&shrink_block_group->lock);
6817 return 0;
6818 }
6819
6820
6821 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
6822 struct btrfs_block_group_cache *group)
6823
6824 {
6825 __alloc_chunk_for_shrink(root, group, 1);
6826 set_block_group_readonly(group);
6827 return 0;
6828 }
6829
6830 #if 0
6831 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
6832 struct btrfs_root *root,
6833 u64 objectid, u64 size)
6834 {
6835 struct btrfs_path *path;
6836 struct btrfs_inode_item *item;
6837 struct extent_buffer *leaf;
6838 int ret;
6839
6840 path = btrfs_alloc_path();
6841 if (!path)
6842 return -ENOMEM;
6843
6844 path->leave_spinning = 1;
6845 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
6846 if (ret)
6847 goto out;
6848
6849 leaf = path->nodes[0];
6850 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
6851 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
6852 btrfs_set_inode_generation(leaf, item, 1);
6853 btrfs_set_inode_size(leaf, item, size);
6854 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
6855 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
6856 btrfs_mark_buffer_dirty(leaf);
6857 btrfs_release_path(root, path);
6858 out:
6859 btrfs_free_path(path);
6860 return ret;
6861 }
6862
6863 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
6864 struct btrfs_block_group_cache *group)
6865 {
6866 struct inode *inode = NULL;
6867 struct btrfs_trans_handle *trans;
6868 struct btrfs_root *root;
6869 struct btrfs_key root_key;
6870 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
6871 int err = 0;
6872
6873 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6874 root_key.type = BTRFS_ROOT_ITEM_KEY;
6875 root_key.offset = (u64)-1;
6876 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
6877 if (IS_ERR(root))
6878 return ERR_CAST(root);
6879
6880 trans = btrfs_start_transaction(root, 1);
6881 BUG_ON(!trans);
6882
6883 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
6884 if (err)
6885 goto out;
6886
6887 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
6888 BUG_ON(err);
6889
6890 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
6891 group->key.offset, 0, group->key.offset,
6892 0, 0, 0);
6893 BUG_ON(err);
6894
6895 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
6896 if (inode->i_state & I_NEW) {
6897 BTRFS_I(inode)->root = root;
6898 BTRFS_I(inode)->location.objectid = objectid;
6899 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
6900 BTRFS_I(inode)->location.offset = 0;
6901 btrfs_read_locked_inode(inode);
6902 unlock_new_inode(inode);
6903 BUG_ON(is_bad_inode(inode));
6904 } else {
6905 BUG_ON(1);
6906 }
6907 BTRFS_I(inode)->index_cnt = group->key.objectid;
6908
6909 err = btrfs_orphan_add(trans, inode);
6910 out:
6911 btrfs_end_transaction(trans, root);
6912 if (err) {
6913 if (inode)
6914 iput(inode);
6915 inode = ERR_PTR(err);
6916 }
6917 return inode;
6918 }
6919
6920 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
6921 {
6922
6923 struct btrfs_ordered_sum *sums;
6924 struct btrfs_sector_sum *sector_sum;
6925 struct btrfs_ordered_extent *ordered;
6926 struct btrfs_root *root = BTRFS_I(inode)->root;
6927 struct list_head list;
6928 size_t offset;
6929 int ret;
6930 u64 disk_bytenr;
6931
6932 INIT_LIST_HEAD(&list);
6933
6934 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
6935 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
6936
6937 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
6938 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
6939 disk_bytenr + len - 1, &list);
6940
6941 while (!list_empty(&list)) {
6942 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
6943 list_del_init(&sums->list);
6944
6945 sector_sum = sums->sums;
6946 sums->bytenr = ordered->start;
6947
6948 offset = 0;
6949 while (offset < sums->len) {
6950 sector_sum->bytenr += ordered->start - disk_bytenr;
6951 sector_sum++;
6952 offset += root->sectorsize;
6953 }
6954
6955 btrfs_add_ordered_sum(inode, ordered, sums);
6956 }
6957 btrfs_put_ordered_extent(ordered);
6958 return 0;
6959 }
6960
6961 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
6962 {
6963 struct btrfs_trans_handle *trans;
6964 struct btrfs_path *path;
6965 struct btrfs_fs_info *info = root->fs_info;
6966 struct extent_buffer *leaf;
6967 struct inode *reloc_inode;
6968 struct btrfs_block_group_cache *block_group;
6969 struct btrfs_key key;
6970 u64 skipped;
6971 u64 cur_byte;
6972 u64 total_found;
6973 u32 nritems;
6974 int ret;
6975 int progress;
6976 int pass = 0;
6977
6978 root = root->fs_info->extent_root;
6979
6980 block_group = btrfs_lookup_block_group(info, group_start);
6981 BUG_ON(!block_group);
6982
6983 printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
6984 (unsigned long long)block_group->key.objectid,
6985 (unsigned long long)block_group->flags);
6986
6987 path = btrfs_alloc_path();
6988 BUG_ON(!path);
6989
6990 reloc_inode = create_reloc_inode(info, block_group);
6991 BUG_ON(IS_ERR(reloc_inode));
6992
6993 __alloc_chunk_for_shrink(root, block_group, 1);
6994 set_block_group_readonly(block_group);
6995
6996 btrfs_start_delalloc_inodes(info->tree_root);
6997 btrfs_wait_ordered_extents(info->tree_root, 0);
6998 again:
6999 skipped = 0;
7000 total_found = 0;
7001 progress = 0;
7002 key.objectid = block_group->key.objectid;
7003 key.offset = 0;
7004 key.type = 0;
7005 cur_byte = key.objectid;
7006
7007 trans = btrfs_start_transaction(info->tree_root, 1);
7008 btrfs_commit_transaction(trans, info->tree_root);
7009
7010 mutex_lock(&root->fs_info->cleaner_mutex);
7011 btrfs_clean_old_snapshots(info->tree_root);
7012 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
7013 mutex_unlock(&root->fs_info->cleaner_mutex);
7014
7015 trans = btrfs_start_transaction(info->tree_root, 1);
7016 btrfs_commit_transaction(trans, info->tree_root);
7017
7018 while (1) {
7019 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7020 if (ret < 0)
7021 goto out;
7022 next:
7023 leaf = path->nodes[0];
7024 nritems = btrfs_header_nritems(leaf);
7025 if (path->slots[0] >= nritems) {
7026 ret = btrfs_next_leaf(root, path);
7027 if (ret < 0)
7028 goto out;
7029 if (ret == 1) {
7030 ret = 0;
7031 break;
7032 }
7033 leaf = path->nodes[0];
7034 nritems = btrfs_header_nritems(leaf);
7035 }
7036
7037 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7038
7039 if (key.objectid >= block_group->key.objectid +
7040 block_group->key.offset)
7041 break;
7042
7043 if (progress && need_resched()) {
7044 btrfs_release_path(root, path);
7045 cond_resched();
7046 progress = 0;
7047 continue;
7048 }
7049 progress = 1;
7050
7051 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
7052 key.objectid + key.offset <= cur_byte) {
7053 path->slots[0]++;
7054 goto next;
7055 }
7056
7057 total_found++;
7058 cur_byte = key.objectid + key.offset;
7059 btrfs_release_path(root, path);
7060
7061 __alloc_chunk_for_shrink(root, block_group, 0);
7062 ret = relocate_one_extent(root, path, &key, block_group,
7063 reloc_inode, pass);
7064 BUG_ON(ret < 0);
7065 if (ret > 0)
7066 skipped++;
7067
7068 key.objectid = cur_byte;
7069 key.type = 0;
7070 key.offset = 0;
7071 }
7072
7073 btrfs_release_path(root, path);
7074
7075 if (pass == 0) {
7076 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
7077 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
7078 }
7079
7080 if (total_found > 0) {
7081 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
7082 (unsigned long long)total_found, pass);
7083 pass++;
7084 if (total_found == skipped && pass > 2) {
7085 iput(reloc_inode);
7086 reloc_inode = create_reloc_inode(info, block_group);
7087 pass = 0;
7088 }
7089 goto again;
7090 }
7091
7092 /* delete reloc_inode */
7093 iput(reloc_inode);
7094
7095 /* unpin extents in this range */
7096 trans = btrfs_start_transaction(info->tree_root, 1);
7097 btrfs_commit_transaction(trans, info->tree_root);
7098
7099 spin_lock(&block_group->lock);
7100 WARN_ON(block_group->pinned > 0);
7101 WARN_ON(block_group->reserved > 0);
7102 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
7103 spin_unlock(&block_group->lock);
7104 btrfs_put_block_group(block_group);
7105 ret = 0;
7106 out:
7107 btrfs_free_path(path);
7108 return ret;
7109 }
7110 #endif
7111
7112 static int find_first_block_group(struct btrfs_root *root,
7113 struct btrfs_path *path, struct btrfs_key *key)
7114 {
7115 int ret = 0;
7116 struct btrfs_key found_key;
7117 struct extent_buffer *leaf;
7118 int slot;
7119
7120 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7121 if (ret < 0)
7122 goto out;
7123
7124 while (1) {
7125 slot = path->slots[0];
7126 leaf = path->nodes[0];
7127 if (slot >= btrfs_header_nritems(leaf)) {
7128 ret = btrfs_next_leaf(root, path);
7129 if (ret == 0)
7130 continue;
7131 if (ret < 0)
7132 goto out;
7133 break;
7134 }
7135 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7136
7137 if (found_key.objectid >= key->objectid &&
7138 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7139 ret = 0;
7140 goto out;
7141 }
7142 path->slots[0]++;
7143 }
7144 ret = -ENOENT;
7145 out:
7146 return ret;
7147 }
7148
7149 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7150 {
7151 struct btrfs_block_group_cache *block_group;
7152 struct btrfs_space_info *space_info;
7153 struct rb_node *n;
7154
7155 spin_lock(&info->block_group_cache_lock);
7156 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7157 block_group = rb_entry(n, struct btrfs_block_group_cache,
7158 cache_node);
7159 rb_erase(&block_group->cache_node,
7160 &info->block_group_cache_tree);
7161 spin_unlock(&info->block_group_cache_lock);
7162
7163 down_write(&block_group->space_info->groups_sem);
7164 list_del(&block_group->list);
7165 up_write(&block_group->space_info->groups_sem);
7166
7167 if (block_group->cached == BTRFS_CACHE_STARTED)
7168 wait_event(block_group->caching_q,
7169 block_group_cache_done(block_group));
7170
7171 btrfs_remove_free_space_cache(block_group);
7172
7173 WARN_ON(atomic_read(&block_group->count) != 1);
7174 kfree(block_group);
7175
7176 spin_lock(&info->block_group_cache_lock);
7177 }
7178 spin_unlock(&info->block_group_cache_lock);
7179
7180 /* now that all the block groups are freed, go through and
7181 * free all the space_info structs. This is only called during
7182 * the final stages of unmount, and so we know nobody is
7183 * using them. We call synchronize_rcu() once before we start,
7184 * just to be on the safe side.
7185 */
7186 synchronize_rcu();
7187
7188 while(!list_empty(&info->space_info)) {
7189 space_info = list_entry(info->space_info.next,
7190 struct btrfs_space_info,
7191 list);
7192
7193 list_del(&space_info->list);
7194 kfree(space_info);
7195 }
7196 return 0;
7197 }
7198
7199 int btrfs_read_block_groups(struct btrfs_root *root)
7200 {
7201 struct btrfs_path *path;
7202 int ret;
7203 struct btrfs_block_group_cache *cache;
7204 struct btrfs_fs_info *info = root->fs_info;
7205 struct btrfs_space_info *space_info;
7206 struct btrfs_key key;
7207 struct btrfs_key found_key;
7208 struct extent_buffer *leaf;
7209
7210 root = info->extent_root;
7211 key.objectid = 0;
7212 key.offset = 0;
7213 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7214 path = btrfs_alloc_path();
7215 if (!path)
7216 return -ENOMEM;
7217
7218 while (1) {
7219 ret = find_first_block_group(root, path, &key);
7220 if (ret > 0) {
7221 ret = 0;
7222 goto error;
7223 }
7224 if (ret != 0)
7225 goto error;
7226
7227 leaf = path->nodes[0];
7228 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7229 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7230 if (!cache) {
7231 ret = -ENOMEM;
7232 break;
7233 }
7234
7235 atomic_set(&cache->count, 1);
7236 spin_lock_init(&cache->lock);
7237 spin_lock_init(&cache->tree_lock);
7238 cache->fs_info = info;
7239 init_waitqueue_head(&cache->caching_q);
7240 INIT_LIST_HEAD(&cache->list);
7241 INIT_LIST_HEAD(&cache->cluster_list);
7242
7243 /*
7244 * we only want to have 32k of ram per block group for keeping
7245 * track of free space, and if we pass 1/2 of that we want to
7246 * start converting things over to using bitmaps
7247 */
7248 cache->extents_thresh = ((1024 * 32) / 2) /
7249 sizeof(struct btrfs_free_space);
7250
7251 read_extent_buffer(leaf, &cache->item,
7252 btrfs_item_ptr_offset(leaf, path->slots[0]),
7253 sizeof(cache->item));
7254 memcpy(&cache->key, &found_key, sizeof(found_key));
7255
7256 key.objectid = found_key.objectid + found_key.offset;
7257 btrfs_release_path(root, path);
7258 cache->flags = btrfs_block_group_flags(&cache->item);
7259 cache->sectorsize = root->sectorsize;
7260
7261 remove_sb_from_cache(root, cache);
7262
7263 /*
7264 * check for two cases, either we are full, and therefore
7265 * don't need to bother with the caching work since we won't
7266 * find any space, or we are empty, and we can just add all
7267 * the space in and be done with it. This saves us _alot_ of
7268 * time, particularly in the full case.
7269 */
7270 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7271 cache->cached = BTRFS_CACHE_FINISHED;
7272 } else if (btrfs_block_group_used(&cache->item) == 0) {
7273 cache->cached = BTRFS_CACHE_FINISHED;
7274 add_new_free_space(cache, root->fs_info,
7275 found_key.objectid,
7276 found_key.objectid +
7277 found_key.offset);
7278 }
7279
7280 ret = update_space_info(info, cache->flags, found_key.offset,
7281 btrfs_block_group_used(&cache->item),
7282 &space_info);
7283 BUG_ON(ret);
7284 cache->space_info = space_info;
7285 down_write(&space_info->groups_sem);
7286 list_add_tail(&cache->list, &space_info->block_groups);
7287 up_write(&space_info->groups_sem);
7288
7289 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7290 BUG_ON(ret);
7291
7292 set_avail_alloc_bits(root->fs_info, cache->flags);
7293 if (btrfs_chunk_readonly(root, cache->key.objectid))
7294 set_block_group_readonly(cache);
7295 }
7296 ret = 0;
7297 error:
7298 btrfs_free_path(path);
7299 return ret;
7300 }
7301
7302 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7303 struct btrfs_root *root, u64 bytes_used,
7304 u64 type, u64 chunk_objectid, u64 chunk_offset,
7305 u64 size)
7306 {
7307 int ret;
7308 struct btrfs_root *extent_root;
7309 struct btrfs_block_group_cache *cache;
7310
7311 extent_root = root->fs_info->extent_root;
7312
7313 root->fs_info->last_trans_log_full_commit = trans->transid;
7314
7315 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7316 if (!cache)
7317 return -ENOMEM;
7318
7319 cache->key.objectid = chunk_offset;
7320 cache->key.offset = size;
7321 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7322 cache->sectorsize = root->sectorsize;
7323
7324 /*
7325 * we only want to have 32k of ram per block group for keeping track
7326 * of free space, and if we pass 1/2 of that we want to start
7327 * converting things over to using bitmaps
7328 */
7329 cache->extents_thresh = ((1024 * 32) / 2) /
7330 sizeof(struct btrfs_free_space);
7331 atomic_set(&cache->count, 1);
7332 spin_lock_init(&cache->lock);
7333 spin_lock_init(&cache->tree_lock);
7334 init_waitqueue_head(&cache->caching_q);
7335 INIT_LIST_HEAD(&cache->list);
7336 INIT_LIST_HEAD(&cache->cluster_list);
7337
7338 btrfs_set_block_group_used(&cache->item, bytes_used);
7339 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7340 cache->flags = type;
7341 btrfs_set_block_group_flags(&cache->item, type);
7342
7343 cache->cached = BTRFS_CACHE_FINISHED;
7344 remove_sb_from_cache(root, cache);
7345
7346 add_new_free_space(cache, root->fs_info, chunk_offset,
7347 chunk_offset + size);
7348
7349 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7350 &cache->space_info);
7351 BUG_ON(ret);
7352 down_write(&cache->space_info->groups_sem);
7353 list_add_tail(&cache->list, &cache->space_info->block_groups);
7354 up_write(&cache->space_info->groups_sem);
7355
7356 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7357 BUG_ON(ret);
7358
7359 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7360 sizeof(cache->item));
7361 BUG_ON(ret);
7362
7363 set_avail_alloc_bits(extent_root->fs_info, type);
7364
7365 return 0;
7366 }
7367
7368 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7369 struct btrfs_root *root, u64 group_start)
7370 {
7371 struct btrfs_path *path;
7372 struct btrfs_block_group_cache *block_group;
7373 struct btrfs_free_cluster *cluster;
7374 struct btrfs_key key;
7375 int ret;
7376
7377 root = root->fs_info->extent_root;
7378
7379 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7380 BUG_ON(!block_group);
7381 BUG_ON(!block_group->ro);
7382
7383 memcpy(&key, &block_group->key, sizeof(key));
7384
7385 /* make sure this block group isn't part of an allocation cluster */
7386 cluster = &root->fs_info->data_alloc_cluster;
7387 spin_lock(&cluster->refill_lock);
7388 btrfs_return_cluster_to_free_space(block_group, cluster);
7389 spin_unlock(&cluster->refill_lock);
7390
7391 /*
7392 * make sure this block group isn't part of a metadata
7393 * allocation cluster
7394 */
7395 cluster = &root->fs_info->meta_alloc_cluster;
7396 spin_lock(&cluster->refill_lock);
7397 btrfs_return_cluster_to_free_space(block_group, cluster);
7398 spin_unlock(&cluster->refill_lock);
7399
7400 path = btrfs_alloc_path();
7401 BUG_ON(!path);
7402
7403 spin_lock(&root->fs_info->block_group_cache_lock);
7404 rb_erase(&block_group->cache_node,
7405 &root->fs_info->block_group_cache_tree);
7406 spin_unlock(&root->fs_info->block_group_cache_lock);
7407
7408 down_write(&block_group->space_info->groups_sem);
7409 /*
7410 * we must use list_del_init so people can check to see if they
7411 * are still on the list after taking the semaphore
7412 */
7413 list_del_init(&block_group->list);
7414 up_write(&block_group->space_info->groups_sem);
7415
7416 if (block_group->cached == BTRFS_CACHE_STARTED)
7417 wait_event(block_group->caching_q,
7418 block_group_cache_done(block_group));
7419
7420 btrfs_remove_free_space_cache(block_group);
7421
7422 spin_lock(&block_group->space_info->lock);
7423 block_group->space_info->total_bytes -= block_group->key.offset;
7424 block_group->space_info->bytes_readonly -= block_group->key.offset;
7425 spin_unlock(&block_group->space_info->lock);
7426
7427 btrfs_clear_space_info_full(root->fs_info);
7428
7429 btrfs_put_block_group(block_group);
7430 btrfs_put_block_group(block_group);
7431
7432 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7433 if (ret > 0)
7434 ret = -EIO;
7435 if (ret < 0)
7436 goto out;
7437
7438 ret = btrfs_del_item(trans, root, path);
7439 out:
7440 btrfs_free_path(path);
7441 return ret;
7442 }
This page took 0.193111 seconds and 6 git commands to generate.