Btrfs: don't call btrfs_throttle in file write
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include "compat.h"
28 #include "hash.h"
29 #include "ctree.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "transaction.h"
33 #include "volumes.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36
37 /* control flags for do_chunk_alloc's force field
38 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
39 * if we really need one.
40 *
41 * CHUNK_ALLOC_FORCE means it must try to allocate one
42 *
43 * CHUNK_ALLOC_LIMITED means to only try and allocate one
44 * if we have very few chunks already allocated. This is
45 * used as part of the clustering code to help make sure
46 * we have a good pool of storage to cluster in, without
47 * filling the FS with empty chunks
48 *
49 */
50 enum {
51 CHUNK_ALLOC_NO_FORCE = 0,
52 CHUNK_ALLOC_FORCE = 1,
53 CHUNK_ALLOC_LIMITED = 2,
54 };
55
56 /*
57 * Control how reservations are dealt with.
58 *
59 * RESERVE_FREE - freeing a reservation.
60 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
61 * ENOSPC accounting
62 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
63 * bytes_may_use as the ENOSPC accounting is done elsewhere
64 */
65 enum {
66 RESERVE_FREE = 0,
67 RESERVE_ALLOC = 1,
68 RESERVE_ALLOC_NO_ACCOUNT = 2,
69 };
70
71 static int update_block_group(struct btrfs_trans_handle *trans,
72 struct btrfs_root *root,
73 u64 bytenr, u64 num_bytes, int alloc);
74 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
75 struct btrfs_root *root,
76 u64 bytenr, u64 num_bytes, u64 parent,
77 u64 root_objectid, u64 owner_objectid,
78 u64 owner_offset, int refs_to_drop,
79 struct btrfs_delayed_extent_op *extra_op);
80 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
81 struct extent_buffer *leaf,
82 struct btrfs_extent_item *ei);
83 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
84 struct btrfs_root *root,
85 u64 parent, u64 root_objectid,
86 u64 flags, u64 owner, u64 offset,
87 struct btrfs_key *ins, int ref_mod);
88 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 u64 parent, u64 root_objectid,
91 u64 flags, struct btrfs_disk_key *key,
92 int level, struct btrfs_key *ins);
93 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
94 struct btrfs_root *extent_root, u64 alloc_bytes,
95 u64 flags, int force);
96 static int find_next_key(struct btrfs_path *path, int level,
97 struct btrfs_key *key);
98 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
99 int dump_block_groups);
100 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
101 u64 num_bytes, int reserve);
102
103 static noinline int
104 block_group_cache_done(struct btrfs_block_group_cache *cache)
105 {
106 smp_mb();
107 return cache->cached == BTRFS_CACHE_FINISHED;
108 }
109
110 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
111 {
112 return (cache->flags & bits) == bits;
113 }
114
115 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
116 {
117 atomic_inc(&cache->count);
118 }
119
120 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
121 {
122 if (atomic_dec_and_test(&cache->count)) {
123 WARN_ON(cache->pinned > 0);
124 WARN_ON(cache->reserved > 0);
125 kfree(cache->free_space_ctl);
126 kfree(cache);
127 }
128 }
129
130 /*
131 * this adds the block group to the fs_info rb tree for the block group
132 * cache
133 */
134 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
135 struct btrfs_block_group_cache *block_group)
136 {
137 struct rb_node **p;
138 struct rb_node *parent = NULL;
139 struct btrfs_block_group_cache *cache;
140
141 spin_lock(&info->block_group_cache_lock);
142 p = &info->block_group_cache_tree.rb_node;
143
144 while (*p) {
145 parent = *p;
146 cache = rb_entry(parent, struct btrfs_block_group_cache,
147 cache_node);
148 if (block_group->key.objectid < cache->key.objectid) {
149 p = &(*p)->rb_left;
150 } else if (block_group->key.objectid > cache->key.objectid) {
151 p = &(*p)->rb_right;
152 } else {
153 spin_unlock(&info->block_group_cache_lock);
154 return -EEXIST;
155 }
156 }
157
158 rb_link_node(&block_group->cache_node, parent, p);
159 rb_insert_color(&block_group->cache_node,
160 &info->block_group_cache_tree);
161 spin_unlock(&info->block_group_cache_lock);
162
163 return 0;
164 }
165
166 /*
167 * This will return the block group at or after bytenr if contains is 0, else
168 * it will return the block group that contains the bytenr
169 */
170 static struct btrfs_block_group_cache *
171 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
172 int contains)
173 {
174 struct btrfs_block_group_cache *cache, *ret = NULL;
175 struct rb_node *n;
176 u64 end, start;
177
178 spin_lock(&info->block_group_cache_lock);
179 n = info->block_group_cache_tree.rb_node;
180
181 while (n) {
182 cache = rb_entry(n, struct btrfs_block_group_cache,
183 cache_node);
184 end = cache->key.objectid + cache->key.offset - 1;
185 start = cache->key.objectid;
186
187 if (bytenr < start) {
188 if (!contains && (!ret || start < ret->key.objectid))
189 ret = cache;
190 n = n->rb_left;
191 } else if (bytenr > start) {
192 if (contains && bytenr <= end) {
193 ret = cache;
194 break;
195 }
196 n = n->rb_right;
197 } else {
198 ret = cache;
199 break;
200 }
201 }
202 if (ret)
203 btrfs_get_block_group(ret);
204 spin_unlock(&info->block_group_cache_lock);
205
206 return ret;
207 }
208
209 static int add_excluded_extent(struct btrfs_root *root,
210 u64 start, u64 num_bytes)
211 {
212 u64 end = start + num_bytes - 1;
213 set_extent_bits(&root->fs_info->freed_extents[0],
214 start, end, EXTENT_UPTODATE, GFP_NOFS);
215 set_extent_bits(&root->fs_info->freed_extents[1],
216 start, end, EXTENT_UPTODATE, GFP_NOFS);
217 return 0;
218 }
219
220 static void free_excluded_extents(struct btrfs_root *root,
221 struct btrfs_block_group_cache *cache)
222 {
223 u64 start, end;
224
225 start = cache->key.objectid;
226 end = start + cache->key.offset - 1;
227
228 clear_extent_bits(&root->fs_info->freed_extents[0],
229 start, end, EXTENT_UPTODATE, GFP_NOFS);
230 clear_extent_bits(&root->fs_info->freed_extents[1],
231 start, end, EXTENT_UPTODATE, GFP_NOFS);
232 }
233
234 static int exclude_super_stripes(struct btrfs_root *root,
235 struct btrfs_block_group_cache *cache)
236 {
237 u64 bytenr;
238 u64 *logical;
239 int stripe_len;
240 int i, nr, ret;
241
242 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
243 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
244 cache->bytes_super += stripe_len;
245 ret = add_excluded_extent(root, cache->key.objectid,
246 stripe_len);
247 BUG_ON(ret);
248 }
249
250 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
251 bytenr = btrfs_sb_offset(i);
252 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
253 cache->key.objectid, bytenr,
254 0, &logical, &nr, &stripe_len);
255 BUG_ON(ret);
256
257 while (nr--) {
258 cache->bytes_super += stripe_len;
259 ret = add_excluded_extent(root, logical[nr],
260 stripe_len);
261 BUG_ON(ret);
262 }
263
264 kfree(logical);
265 }
266 return 0;
267 }
268
269 static struct btrfs_caching_control *
270 get_caching_control(struct btrfs_block_group_cache *cache)
271 {
272 struct btrfs_caching_control *ctl;
273
274 spin_lock(&cache->lock);
275 if (cache->cached != BTRFS_CACHE_STARTED) {
276 spin_unlock(&cache->lock);
277 return NULL;
278 }
279
280 /* We're loading it the fast way, so we don't have a caching_ctl. */
281 if (!cache->caching_ctl) {
282 spin_unlock(&cache->lock);
283 return NULL;
284 }
285
286 ctl = cache->caching_ctl;
287 atomic_inc(&ctl->count);
288 spin_unlock(&cache->lock);
289 return ctl;
290 }
291
292 static void put_caching_control(struct btrfs_caching_control *ctl)
293 {
294 if (atomic_dec_and_test(&ctl->count))
295 kfree(ctl);
296 }
297
298 /*
299 * this is only called by cache_block_group, since we could have freed extents
300 * we need to check the pinned_extents for any extents that can't be used yet
301 * since their free space will be released as soon as the transaction commits.
302 */
303 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
304 struct btrfs_fs_info *info, u64 start, u64 end)
305 {
306 u64 extent_start, extent_end, size, total_added = 0;
307 int ret;
308
309 while (start < end) {
310 ret = find_first_extent_bit(info->pinned_extents, start,
311 &extent_start, &extent_end,
312 EXTENT_DIRTY | EXTENT_UPTODATE);
313 if (ret)
314 break;
315
316 if (extent_start <= start) {
317 start = extent_end + 1;
318 } else if (extent_start > start && extent_start < end) {
319 size = extent_start - start;
320 total_added += size;
321 ret = btrfs_add_free_space(block_group, start,
322 size);
323 BUG_ON(ret);
324 start = extent_end + 1;
325 } else {
326 break;
327 }
328 }
329
330 if (start < end) {
331 size = end - start;
332 total_added += size;
333 ret = btrfs_add_free_space(block_group, start, size);
334 BUG_ON(ret);
335 }
336
337 return total_added;
338 }
339
340 static noinline void caching_thread(struct btrfs_work *work)
341 {
342 struct btrfs_block_group_cache *block_group;
343 struct btrfs_fs_info *fs_info;
344 struct btrfs_caching_control *caching_ctl;
345 struct btrfs_root *extent_root;
346 struct btrfs_path *path;
347 struct extent_buffer *leaf;
348 struct btrfs_key key;
349 u64 total_found = 0;
350 u64 last = 0;
351 u32 nritems;
352 int ret = 0;
353
354 caching_ctl = container_of(work, struct btrfs_caching_control, work);
355 block_group = caching_ctl->block_group;
356 fs_info = block_group->fs_info;
357 extent_root = fs_info->extent_root;
358
359 path = btrfs_alloc_path();
360 if (!path)
361 goto out;
362
363 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
364
365 /*
366 * We don't want to deadlock with somebody trying to allocate a new
367 * extent for the extent root while also trying to search the extent
368 * root to add free space. So we skip locking and search the commit
369 * root, since its read-only
370 */
371 path->skip_locking = 1;
372 path->search_commit_root = 1;
373 path->reada = 1;
374
375 key.objectid = last;
376 key.offset = 0;
377 key.type = BTRFS_EXTENT_ITEM_KEY;
378 again:
379 mutex_lock(&caching_ctl->mutex);
380 /* need to make sure the commit_root doesn't disappear */
381 down_read(&fs_info->extent_commit_sem);
382
383 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
384 if (ret < 0)
385 goto err;
386
387 leaf = path->nodes[0];
388 nritems = btrfs_header_nritems(leaf);
389
390 while (1) {
391 if (btrfs_fs_closing(fs_info) > 1) {
392 last = (u64)-1;
393 break;
394 }
395
396 if (path->slots[0] < nritems) {
397 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
398 } else {
399 ret = find_next_key(path, 0, &key);
400 if (ret)
401 break;
402
403 if (need_resched() ||
404 btrfs_next_leaf(extent_root, path)) {
405 caching_ctl->progress = last;
406 btrfs_release_path(path);
407 up_read(&fs_info->extent_commit_sem);
408 mutex_unlock(&caching_ctl->mutex);
409 cond_resched();
410 goto again;
411 }
412 leaf = path->nodes[0];
413 nritems = btrfs_header_nritems(leaf);
414 continue;
415 }
416
417 if (key.objectid < block_group->key.objectid) {
418 path->slots[0]++;
419 continue;
420 }
421
422 if (key.objectid >= block_group->key.objectid +
423 block_group->key.offset)
424 break;
425
426 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
427 total_found += add_new_free_space(block_group,
428 fs_info, last,
429 key.objectid);
430 last = key.objectid + key.offset;
431
432 if (total_found > (1024 * 1024 * 2)) {
433 total_found = 0;
434 wake_up(&caching_ctl->wait);
435 }
436 }
437 path->slots[0]++;
438 }
439 ret = 0;
440
441 total_found += add_new_free_space(block_group, fs_info, last,
442 block_group->key.objectid +
443 block_group->key.offset);
444 caching_ctl->progress = (u64)-1;
445
446 spin_lock(&block_group->lock);
447 block_group->caching_ctl = NULL;
448 block_group->cached = BTRFS_CACHE_FINISHED;
449 spin_unlock(&block_group->lock);
450
451 err:
452 btrfs_free_path(path);
453 up_read(&fs_info->extent_commit_sem);
454
455 free_excluded_extents(extent_root, block_group);
456
457 mutex_unlock(&caching_ctl->mutex);
458 out:
459 wake_up(&caching_ctl->wait);
460
461 put_caching_control(caching_ctl);
462 btrfs_put_block_group(block_group);
463 }
464
465 static int cache_block_group(struct btrfs_block_group_cache *cache,
466 struct btrfs_trans_handle *trans,
467 struct btrfs_root *root,
468 int load_cache_only)
469 {
470 DEFINE_WAIT(wait);
471 struct btrfs_fs_info *fs_info = cache->fs_info;
472 struct btrfs_caching_control *caching_ctl;
473 int ret = 0;
474
475 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
476 BUG_ON(!caching_ctl);
477
478 INIT_LIST_HEAD(&caching_ctl->list);
479 mutex_init(&caching_ctl->mutex);
480 init_waitqueue_head(&caching_ctl->wait);
481 caching_ctl->block_group = cache;
482 caching_ctl->progress = cache->key.objectid;
483 atomic_set(&caching_ctl->count, 1);
484 caching_ctl->work.func = caching_thread;
485
486 spin_lock(&cache->lock);
487 /*
488 * This should be a rare occasion, but this could happen I think in the
489 * case where one thread starts to load the space cache info, and then
490 * some other thread starts a transaction commit which tries to do an
491 * allocation while the other thread is still loading the space cache
492 * info. The previous loop should have kept us from choosing this block
493 * group, but if we've moved to the state where we will wait on caching
494 * block groups we need to first check if we're doing a fast load here,
495 * so we can wait for it to finish, otherwise we could end up allocating
496 * from a block group who's cache gets evicted for one reason or
497 * another.
498 */
499 while (cache->cached == BTRFS_CACHE_FAST) {
500 struct btrfs_caching_control *ctl;
501
502 ctl = cache->caching_ctl;
503 atomic_inc(&ctl->count);
504 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
505 spin_unlock(&cache->lock);
506
507 schedule();
508
509 finish_wait(&ctl->wait, &wait);
510 put_caching_control(ctl);
511 spin_lock(&cache->lock);
512 }
513
514 if (cache->cached != BTRFS_CACHE_NO) {
515 spin_unlock(&cache->lock);
516 kfree(caching_ctl);
517 return 0;
518 }
519 WARN_ON(cache->caching_ctl);
520 cache->caching_ctl = caching_ctl;
521 cache->cached = BTRFS_CACHE_FAST;
522 spin_unlock(&cache->lock);
523
524 /*
525 * We can't do the read from on-disk cache during a commit since we need
526 * to have the normal tree locking. Also if we are currently trying to
527 * allocate blocks for the tree root we can't do the fast caching since
528 * we likely hold important locks.
529 */
530 if (trans && (!trans->transaction->in_commit) &&
531 (root && root != root->fs_info->tree_root) &&
532 btrfs_test_opt(root, SPACE_CACHE)) {
533 ret = load_free_space_cache(fs_info, cache);
534
535 spin_lock(&cache->lock);
536 if (ret == 1) {
537 cache->caching_ctl = NULL;
538 cache->cached = BTRFS_CACHE_FINISHED;
539 cache->last_byte_to_unpin = (u64)-1;
540 } else {
541 if (load_cache_only) {
542 cache->caching_ctl = NULL;
543 cache->cached = BTRFS_CACHE_NO;
544 } else {
545 cache->cached = BTRFS_CACHE_STARTED;
546 }
547 }
548 spin_unlock(&cache->lock);
549 wake_up(&caching_ctl->wait);
550 if (ret == 1) {
551 put_caching_control(caching_ctl);
552 free_excluded_extents(fs_info->extent_root, cache);
553 return 0;
554 }
555 } else {
556 /*
557 * We are not going to do the fast caching, set cached to the
558 * appropriate value and wakeup any waiters.
559 */
560 spin_lock(&cache->lock);
561 if (load_cache_only) {
562 cache->caching_ctl = NULL;
563 cache->cached = BTRFS_CACHE_NO;
564 } else {
565 cache->cached = BTRFS_CACHE_STARTED;
566 }
567 spin_unlock(&cache->lock);
568 wake_up(&caching_ctl->wait);
569 }
570
571 if (load_cache_only) {
572 put_caching_control(caching_ctl);
573 return 0;
574 }
575
576 down_write(&fs_info->extent_commit_sem);
577 atomic_inc(&caching_ctl->count);
578 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
579 up_write(&fs_info->extent_commit_sem);
580
581 btrfs_get_block_group(cache);
582
583 btrfs_queue_worker(&fs_info->caching_workers, &caching_ctl->work);
584
585 return ret;
586 }
587
588 /*
589 * return the block group that starts at or after bytenr
590 */
591 static struct btrfs_block_group_cache *
592 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
593 {
594 struct btrfs_block_group_cache *cache;
595
596 cache = block_group_cache_tree_search(info, bytenr, 0);
597
598 return cache;
599 }
600
601 /*
602 * return the block group that contains the given bytenr
603 */
604 struct btrfs_block_group_cache *btrfs_lookup_block_group(
605 struct btrfs_fs_info *info,
606 u64 bytenr)
607 {
608 struct btrfs_block_group_cache *cache;
609
610 cache = block_group_cache_tree_search(info, bytenr, 1);
611
612 return cache;
613 }
614
615 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
616 u64 flags)
617 {
618 struct list_head *head = &info->space_info;
619 struct btrfs_space_info *found;
620
621 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
622
623 rcu_read_lock();
624 list_for_each_entry_rcu(found, head, list) {
625 if (found->flags & flags) {
626 rcu_read_unlock();
627 return found;
628 }
629 }
630 rcu_read_unlock();
631 return NULL;
632 }
633
634 /*
635 * after adding space to the filesystem, we need to clear the full flags
636 * on all the space infos.
637 */
638 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
639 {
640 struct list_head *head = &info->space_info;
641 struct btrfs_space_info *found;
642
643 rcu_read_lock();
644 list_for_each_entry_rcu(found, head, list)
645 found->full = 0;
646 rcu_read_unlock();
647 }
648
649 static u64 div_factor(u64 num, int factor)
650 {
651 if (factor == 10)
652 return num;
653 num *= factor;
654 do_div(num, 10);
655 return num;
656 }
657
658 static u64 div_factor_fine(u64 num, int factor)
659 {
660 if (factor == 100)
661 return num;
662 num *= factor;
663 do_div(num, 100);
664 return num;
665 }
666
667 u64 btrfs_find_block_group(struct btrfs_root *root,
668 u64 search_start, u64 search_hint, int owner)
669 {
670 struct btrfs_block_group_cache *cache;
671 u64 used;
672 u64 last = max(search_hint, search_start);
673 u64 group_start = 0;
674 int full_search = 0;
675 int factor = 9;
676 int wrapped = 0;
677 again:
678 while (1) {
679 cache = btrfs_lookup_first_block_group(root->fs_info, last);
680 if (!cache)
681 break;
682
683 spin_lock(&cache->lock);
684 last = cache->key.objectid + cache->key.offset;
685 used = btrfs_block_group_used(&cache->item);
686
687 if ((full_search || !cache->ro) &&
688 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
689 if (used + cache->pinned + cache->reserved <
690 div_factor(cache->key.offset, factor)) {
691 group_start = cache->key.objectid;
692 spin_unlock(&cache->lock);
693 btrfs_put_block_group(cache);
694 goto found;
695 }
696 }
697 spin_unlock(&cache->lock);
698 btrfs_put_block_group(cache);
699 cond_resched();
700 }
701 if (!wrapped) {
702 last = search_start;
703 wrapped = 1;
704 goto again;
705 }
706 if (!full_search && factor < 10) {
707 last = search_start;
708 full_search = 1;
709 factor = 10;
710 goto again;
711 }
712 found:
713 return group_start;
714 }
715
716 /* simple helper to search for an existing extent at a given offset */
717 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
718 {
719 int ret;
720 struct btrfs_key key;
721 struct btrfs_path *path;
722
723 path = btrfs_alloc_path();
724 if (!path)
725 return -ENOMEM;
726
727 key.objectid = start;
728 key.offset = len;
729 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
730 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
731 0, 0);
732 btrfs_free_path(path);
733 return ret;
734 }
735
736 /*
737 * helper function to lookup reference count and flags of extent.
738 *
739 * the head node for delayed ref is used to store the sum of all the
740 * reference count modifications queued up in the rbtree. the head
741 * node may also store the extent flags to set. This way you can check
742 * to see what the reference count and extent flags would be if all of
743 * the delayed refs are not processed.
744 */
745 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
746 struct btrfs_root *root, u64 bytenr,
747 u64 num_bytes, u64 *refs, u64 *flags)
748 {
749 struct btrfs_delayed_ref_head *head;
750 struct btrfs_delayed_ref_root *delayed_refs;
751 struct btrfs_path *path;
752 struct btrfs_extent_item *ei;
753 struct extent_buffer *leaf;
754 struct btrfs_key key;
755 u32 item_size;
756 u64 num_refs;
757 u64 extent_flags;
758 int ret;
759
760 path = btrfs_alloc_path();
761 if (!path)
762 return -ENOMEM;
763
764 key.objectid = bytenr;
765 key.type = BTRFS_EXTENT_ITEM_KEY;
766 key.offset = num_bytes;
767 if (!trans) {
768 path->skip_locking = 1;
769 path->search_commit_root = 1;
770 }
771 again:
772 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
773 &key, path, 0, 0);
774 if (ret < 0)
775 goto out_free;
776
777 if (ret == 0) {
778 leaf = path->nodes[0];
779 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
780 if (item_size >= sizeof(*ei)) {
781 ei = btrfs_item_ptr(leaf, path->slots[0],
782 struct btrfs_extent_item);
783 num_refs = btrfs_extent_refs(leaf, ei);
784 extent_flags = btrfs_extent_flags(leaf, ei);
785 } else {
786 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
787 struct btrfs_extent_item_v0 *ei0;
788 BUG_ON(item_size != sizeof(*ei0));
789 ei0 = btrfs_item_ptr(leaf, path->slots[0],
790 struct btrfs_extent_item_v0);
791 num_refs = btrfs_extent_refs_v0(leaf, ei0);
792 /* FIXME: this isn't correct for data */
793 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
794 #else
795 BUG();
796 #endif
797 }
798 BUG_ON(num_refs == 0);
799 } else {
800 num_refs = 0;
801 extent_flags = 0;
802 ret = 0;
803 }
804
805 if (!trans)
806 goto out;
807
808 delayed_refs = &trans->transaction->delayed_refs;
809 spin_lock(&delayed_refs->lock);
810 head = btrfs_find_delayed_ref_head(trans, bytenr);
811 if (head) {
812 if (!mutex_trylock(&head->mutex)) {
813 atomic_inc(&head->node.refs);
814 spin_unlock(&delayed_refs->lock);
815
816 btrfs_release_path(path);
817
818 /*
819 * Mutex was contended, block until it's released and try
820 * again
821 */
822 mutex_lock(&head->mutex);
823 mutex_unlock(&head->mutex);
824 btrfs_put_delayed_ref(&head->node);
825 goto again;
826 }
827 if (head->extent_op && head->extent_op->update_flags)
828 extent_flags |= head->extent_op->flags_to_set;
829 else
830 BUG_ON(num_refs == 0);
831
832 num_refs += head->node.ref_mod;
833 mutex_unlock(&head->mutex);
834 }
835 spin_unlock(&delayed_refs->lock);
836 out:
837 WARN_ON(num_refs == 0);
838 if (refs)
839 *refs = num_refs;
840 if (flags)
841 *flags = extent_flags;
842 out_free:
843 btrfs_free_path(path);
844 return ret;
845 }
846
847 /*
848 * Back reference rules. Back refs have three main goals:
849 *
850 * 1) differentiate between all holders of references to an extent so that
851 * when a reference is dropped we can make sure it was a valid reference
852 * before freeing the extent.
853 *
854 * 2) Provide enough information to quickly find the holders of an extent
855 * if we notice a given block is corrupted or bad.
856 *
857 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
858 * maintenance. This is actually the same as #2, but with a slightly
859 * different use case.
860 *
861 * There are two kinds of back refs. The implicit back refs is optimized
862 * for pointers in non-shared tree blocks. For a given pointer in a block,
863 * back refs of this kind provide information about the block's owner tree
864 * and the pointer's key. These information allow us to find the block by
865 * b-tree searching. The full back refs is for pointers in tree blocks not
866 * referenced by their owner trees. The location of tree block is recorded
867 * in the back refs. Actually the full back refs is generic, and can be
868 * used in all cases the implicit back refs is used. The major shortcoming
869 * of the full back refs is its overhead. Every time a tree block gets
870 * COWed, we have to update back refs entry for all pointers in it.
871 *
872 * For a newly allocated tree block, we use implicit back refs for
873 * pointers in it. This means most tree related operations only involve
874 * implicit back refs. For a tree block created in old transaction, the
875 * only way to drop a reference to it is COW it. So we can detect the
876 * event that tree block loses its owner tree's reference and do the
877 * back refs conversion.
878 *
879 * When a tree block is COW'd through a tree, there are four cases:
880 *
881 * The reference count of the block is one and the tree is the block's
882 * owner tree. Nothing to do in this case.
883 *
884 * The reference count of the block is one and the tree is not the
885 * block's owner tree. In this case, full back refs is used for pointers
886 * in the block. Remove these full back refs, add implicit back refs for
887 * every pointers in the new block.
888 *
889 * The reference count of the block is greater than one and the tree is
890 * the block's owner tree. In this case, implicit back refs is used for
891 * pointers in the block. Add full back refs for every pointers in the
892 * block, increase lower level extents' reference counts. The original
893 * implicit back refs are entailed to the new block.
894 *
895 * The reference count of the block is greater than one and the tree is
896 * not the block's owner tree. Add implicit back refs for every pointer in
897 * the new block, increase lower level extents' reference count.
898 *
899 * Back Reference Key composing:
900 *
901 * The key objectid corresponds to the first byte in the extent,
902 * The key type is used to differentiate between types of back refs.
903 * There are different meanings of the key offset for different types
904 * of back refs.
905 *
906 * File extents can be referenced by:
907 *
908 * - multiple snapshots, subvolumes, or different generations in one subvol
909 * - different files inside a single subvolume
910 * - different offsets inside a file (bookend extents in file.c)
911 *
912 * The extent ref structure for the implicit back refs has fields for:
913 *
914 * - Objectid of the subvolume root
915 * - objectid of the file holding the reference
916 * - original offset in the file
917 * - how many bookend extents
918 *
919 * The key offset for the implicit back refs is hash of the first
920 * three fields.
921 *
922 * The extent ref structure for the full back refs has field for:
923 *
924 * - number of pointers in the tree leaf
925 *
926 * The key offset for the implicit back refs is the first byte of
927 * the tree leaf
928 *
929 * When a file extent is allocated, The implicit back refs is used.
930 * the fields are filled in:
931 *
932 * (root_key.objectid, inode objectid, offset in file, 1)
933 *
934 * When a file extent is removed file truncation, we find the
935 * corresponding implicit back refs and check the following fields:
936 *
937 * (btrfs_header_owner(leaf), inode objectid, offset in file)
938 *
939 * Btree extents can be referenced by:
940 *
941 * - Different subvolumes
942 *
943 * Both the implicit back refs and the full back refs for tree blocks
944 * only consist of key. The key offset for the implicit back refs is
945 * objectid of block's owner tree. The key offset for the full back refs
946 * is the first byte of parent block.
947 *
948 * When implicit back refs is used, information about the lowest key and
949 * level of the tree block are required. These information are stored in
950 * tree block info structure.
951 */
952
953 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
954 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
956 struct btrfs_path *path,
957 u64 owner, u32 extra_size)
958 {
959 struct btrfs_extent_item *item;
960 struct btrfs_extent_item_v0 *ei0;
961 struct btrfs_extent_ref_v0 *ref0;
962 struct btrfs_tree_block_info *bi;
963 struct extent_buffer *leaf;
964 struct btrfs_key key;
965 struct btrfs_key found_key;
966 u32 new_size = sizeof(*item);
967 u64 refs;
968 int ret;
969
970 leaf = path->nodes[0];
971 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
972
973 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
974 ei0 = btrfs_item_ptr(leaf, path->slots[0],
975 struct btrfs_extent_item_v0);
976 refs = btrfs_extent_refs_v0(leaf, ei0);
977
978 if (owner == (u64)-1) {
979 while (1) {
980 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
981 ret = btrfs_next_leaf(root, path);
982 if (ret < 0)
983 return ret;
984 BUG_ON(ret > 0);
985 leaf = path->nodes[0];
986 }
987 btrfs_item_key_to_cpu(leaf, &found_key,
988 path->slots[0]);
989 BUG_ON(key.objectid != found_key.objectid);
990 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
991 path->slots[0]++;
992 continue;
993 }
994 ref0 = btrfs_item_ptr(leaf, path->slots[0],
995 struct btrfs_extent_ref_v0);
996 owner = btrfs_ref_objectid_v0(leaf, ref0);
997 break;
998 }
999 }
1000 btrfs_release_path(path);
1001
1002 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1003 new_size += sizeof(*bi);
1004
1005 new_size -= sizeof(*ei0);
1006 ret = btrfs_search_slot(trans, root, &key, path,
1007 new_size + extra_size, 1);
1008 if (ret < 0)
1009 return ret;
1010 BUG_ON(ret);
1011
1012 ret = btrfs_extend_item(trans, root, path, new_size);
1013
1014 leaf = path->nodes[0];
1015 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1016 btrfs_set_extent_refs(leaf, item, refs);
1017 /* FIXME: get real generation */
1018 btrfs_set_extent_generation(leaf, item, 0);
1019 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1020 btrfs_set_extent_flags(leaf, item,
1021 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1022 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1023 bi = (struct btrfs_tree_block_info *)(item + 1);
1024 /* FIXME: get first key of the block */
1025 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1026 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1027 } else {
1028 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1029 }
1030 btrfs_mark_buffer_dirty(leaf);
1031 return 0;
1032 }
1033 #endif
1034
1035 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1036 {
1037 u32 high_crc = ~(u32)0;
1038 u32 low_crc = ~(u32)0;
1039 __le64 lenum;
1040
1041 lenum = cpu_to_le64(root_objectid);
1042 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
1043 lenum = cpu_to_le64(owner);
1044 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1045 lenum = cpu_to_le64(offset);
1046 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
1047
1048 return ((u64)high_crc << 31) ^ (u64)low_crc;
1049 }
1050
1051 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1052 struct btrfs_extent_data_ref *ref)
1053 {
1054 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1055 btrfs_extent_data_ref_objectid(leaf, ref),
1056 btrfs_extent_data_ref_offset(leaf, ref));
1057 }
1058
1059 static int match_extent_data_ref(struct extent_buffer *leaf,
1060 struct btrfs_extent_data_ref *ref,
1061 u64 root_objectid, u64 owner, u64 offset)
1062 {
1063 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1064 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1065 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1066 return 0;
1067 return 1;
1068 }
1069
1070 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1071 struct btrfs_root *root,
1072 struct btrfs_path *path,
1073 u64 bytenr, u64 parent,
1074 u64 root_objectid,
1075 u64 owner, u64 offset)
1076 {
1077 struct btrfs_key key;
1078 struct btrfs_extent_data_ref *ref;
1079 struct extent_buffer *leaf;
1080 u32 nritems;
1081 int ret;
1082 int recow;
1083 int err = -ENOENT;
1084
1085 key.objectid = bytenr;
1086 if (parent) {
1087 key.type = BTRFS_SHARED_DATA_REF_KEY;
1088 key.offset = parent;
1089 } else {
1090 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1091 key.offset = hash_extent_data_ref(root_objectid,
1092 owner, offset);
1093 }
1094 again:
1095 recow = 0;
1096 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1097 if (ret < 0) {
1098 err = ret;
1099 goto fail;
1100 }
1101
1102 if (parent) {
1103 if (!ret)
1104 return 0;
1105 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106 key.type = BTRFS_EXTENT_REF_V0_KEY;
1107 btrfs_release_path(path);
1108 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1109 if (ret < 0) {
1110 err = ret;
1111 goto fail;
1112 }
1113 if (!ret)
1114 return 0;
1115 #endif
1116 goto fail;
1117 }
1118
1119 leaf = path->nodes[0];
1120 nritems = btrfs_header_nritems(leaf);
1121 while (1) {
1122 if (path->slots[0] >= nritems) {
1123 ret = btrfs_next_leaf(root, path);
1124 if (ret < 0)
1125 err = ret;
1126 if (ret)
1127 goto fail;
1128
1129 leaf = path->nodes[0];
1130 nritems = btrfs_header_nritems(leaf);
1131 recow = 1;
1132 }
1133
1134 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1135 if (key.objectid != bytenr ||
1136 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1137 goto fail;
1138
1139 ref = btrfs_item_ptr(leaf, path->slots[0],
1140 struct btrfs_extent_data_ref);
1141
1142 if (match_extent_data_ref(leaf, ref, root_objectid,
1143 owner, offset)) {
1144 if (recow) {
1145 btrfs_release_path(path);
1146 goto again;
1147 }
1148 err = 0;
1149 break;
1150 }
1151 path->slots[0]++;
1152 }
1153 fail:
1154 return err;
1155 }
1156
1157 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1158 struct btrfs_root *root,
1159 struct btrfs_path *path,
1160 u64 bytenr, u64 parent,
1161 u64 root_objectid, u64 owner,
1162 u64 offset, int refs_to_add)
1163 {
1164 struct btrfs_key key;
1165 struct extent_buffer *leaf;
1166 u32 size;
1167 u32 num_refs;
1168 int ret;
1169
1170 key.objectid = bytenr;
1171 if (parent) {
1172 key.type = BTRFS_SHARED_DATA_REF_KEY;
1173 key.offset = parent;
1174 size = sizeof(struct btrfs_shared_data_ref);
1175 } else {
1176 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1177 key.offset = hash_extent_data_ref(root_objectid,
1178 owner, offset);
1179 size = sizeof(struct btrfs_extent_data_ref);
1180 }
1181
1182 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1183 if (ret && ret != -EEXIST)
1184 goto fail;
1185
1186 leaf = path->nodes[0];
1187 if (parent) {
1188 struct btrfs_shared_data_ref *ref;
1189 ref = btrfs_item_ptr(leaf, path->slots[0],
1190 struct btrfs_shared_data_ref);
1191 if (ret == 0) {
1192 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1193 } else {
1194 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1195 num_refs += refs_to_add;
1196 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1197 }
1198 } else {
1199 struct btrfs_extent_data_ref *ref;
1200 while (ret == -EEXIST) {
1201 ref = btrfs_item_ptr(leaf, path->slots[0],
1202 struct btrfs_extent_data_ref);
1203 if (match_extent_data_ref(leaf, ref, root_objectid,
1204 owner, offset))
1205 break;
1206 btrfs_release_path(path);
1207 key.offset++;
1208 ret = btrfs_insert_empty_item(trans, root, path, &key,
1209 size);
1210 if (ret && ret != -EEXIST)
1211 goto fail;
1212
1213 leaf = path->nodes[0];
1214 }
1215 ref = btrfs_item_ptr(leaf, path->slots[0],
1216 struct btrfs_extent_data_ref);
1217 if (ret == 0) {
1218 btrfs_set_extent_data_ref_root(leaf, ref,
1219 root_objectid);
1220 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1221 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1222 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1223 } else {
1224 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1225 num_refs += refs_to_add;
1226 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1227 }
1228 }
1229 btrfs_mark_buffer_dirty(leaf);
1230 ret = 0;
1231 fail:
1232 btrfs_release_path(path);
1233 return ret;
1234 }
1235
1236 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1237 struct btrfs_root *root,
1238 struct btrfs_path *path,
1239 int refs_to_drop)
1240 {
1241 struct btrfs_key key;
1242 struct btrfs_extent_data_ref *ref1 = NULL;
1243 struct btrfs_shared_data_ref *ref2 = NULL;
1244 struct extent_buffer *leaf;
1245 u32 num_refs = 0;
1246 int ret = 0;
1247
1248 leaf = path->nodes[0];
1249 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1250
1251 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1252 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1253 struct btrfs_extent_data_ref);
1254 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1255 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1256 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1257 struct btrfs_shared_data_ref);
1258 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1259 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1260 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1261 struct btrfs_extent_ref_v0 *ref0;
1262 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1263 struct btrfs_extent_ref_v0);
1264 num_refs = btrfs_ref_count_v0(leaf, ref0);
1265 #endif
1266 } else {
1267 BUG();
1268 }
1269
1270 BUG_ON(num_refs < refs_to_drop);
1271 num_refs -= refs_to_drop;
1272
1273 if (num_refs == 0) {
1274 ret = btrfs_del_item(trans, root, path);
1275 } else {
1276 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1277 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1278 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1279 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1280 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1281 else {
1282 struct btrfs_extent_ref_v0 *ref0;
1283 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1284 struct btrfs_extent_ref_v0);
1285 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1286 }
1287 #endif
1288 btrfs_mark_buffer_dirty(leaf);
1289 }
1290 return ret;
1291 }
1292
1293 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1294 struct btrfs_path *path,
1295 struct btrfs_extent_inline_ref *iref)
1296 {
1297 struct btrfs_key key;
1298 struct extent_buffer *leaf;
1299 struct btrfs_extent_data_ref *ref1;
1300 struct btrfs_shared_data_ref *ref2;
1301 u32 num_refs = 0;
1302
1303 leaf = path->nodes[0];
1304 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1305 if (iref) {
1306 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1307 BTRFS_EXTENT_DATA_REF_KEY) {
1308 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1309 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1310 } else {
1311 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1312 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1313 }
1314 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1315 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1316 struct btrfs_extent_data_ref);
1317 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1318 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1319 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1320 struct btrfs_shared_data_ref);
1321 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1322 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1323 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1324 struct btrfs_extent_ref_v0 *ref0;
1325 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1326 struct btrfs_extent_ref_v0);
1327 num_refs = btrfs_ref_count_v0(leaf, ref0);
1328 #endif
1329 } else {
1330 WARN_ON(1);
1331 }
1332 return num_refs;
1333 }
1334
1335 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1336 struct btrfs_root *root,
1337 struct btrfs_path *path,
1338 u64 bytenr, u64 parent,
1339 u64 root_objectid)
1340 {
1341 struct btrfs_key key;
1342 int ret;
1343
1344 key.objectid = bytenr;
1345 if (parent) {
1346 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1347 key.offset = parent;
1348 } else {
1349 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1350 key.offset = root_objectid;
1351 }
1352
1353 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1354 if (ret > 0)
1355 ret = -ENOENT;
1356 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1357 if (ret == -ENOENT && parent) {
1358 btrfs_release_path(path);
1359 key.type = BTRFS_EXTENT_REF_V0_KEY;
1360 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1361 if (ret > 0)
1362 ret = -ENOENT;
1363 }
1364 #endif
1365 return ret;
1366 }
1367
1368 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1369 struct btrfs_root *root,
1370 struct btrfs_path *path,
1371 u64 bytenr, u64 parent,
1372 u64 root_objectid)
1373 {
1374 struct btrfs_key key;
1375 int ret;
1376
1377 key.objectid = bytenr;
1378 if (parent) {
1379 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1380 key.offset = parent;
1381 } else {
1382 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1383 key.offset = root_objectid;
1384 }
1385
1386 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1387 btrfs_release_path(path);
1388 return ret;
1389 }
1390
1391 static inline int extent_ref_type(u64 parent, u64 owner)
1392 {
1393 int type;
1394 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1395 if (parent > 0)
1396 type = BTRFS_SHARED_BLOCK_REF_KEY;
1397 else
1398 type = BTRFS_TREE_BLOCK_REF_KEY;
1399 } else {
1400 if (parent > 0)
1401 type = BTRFS_SHARED_DATA_REF_KEY;
1402 else
1403 type = BTRFS_EXTENT_DATA_REF_KEY;
1404 }
1405 return type;
1406 }
1407
1408 static int find_next_key(struct btrfs_path *path, int level,
1409 struct btrfs_key *key)
1410
1411 {
1412 for (; level < BTRFS_MAX_LEVEL; level++) {
1413 if (!path->nodes[level])
1414 break;
1415 if (path->slots[level] + 1 >=
1416 btrfs_header_nritems(path->nodes[level]))
1417 continue;
1418 if (level == 0)
1419 btrfs_item_key_to_cpu(path->nodes[level], key,
1420 path->slots[level] + 1);
1421 else
1422 btrfs_node_key_to_cpu(path->nodes[level], key,
1423 path->slots[level] + 1);
1424 return 0;
1425 }
1426 return 1;
1427 }
1428
1429 /*
1430 * look for inline back ref. if back ref is found, *ref_ret is set
1431 * to the address of inline back ref, and 0 is returned.
1432 *
1433 * if back ref isn't found, *ref_ret is set to the address where it
1434 * should be inserted, and -ENOENT is returned.
1435 *
1436 * if insert is true and there are too many inline back refs, the path
1437 * points to the extent item, and -EAGAIN is returned.
1438 *
1439 * NOTE: inline back refs are ordered in the same way that back ref
1440 * items in the tree are ordered.
1441 */
1442 static noinline_for_stack
1443 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1444 struct btrfs_root *root,
1445 struct btrfs_path *path,
1446 struct btrfs_extent_inline_ref **ref_ret,
1447 u64 bytenr, u64 num_bytes,
1448 u64 parent, u64 root_objectid,
1449 u64 owner, u64 offset, int insert)
1450 {
1451 struct btrfs_key key;
1452 struct extent_buffer *leaf;
1453 struct btrfs_extent_item *ei;
1454 struct btrfs_extent_inline_ref *iref;
1455 u64 flags;
1456 u64 item_size;
1457 unsigned long ptr;
1458 unsigned long end;
1459 int extra_size;
1460 int type;
1461 int want;
1462 int ret;
1463 int err = 0;
1464
1465 key.objectid = bytenr;
1466 key.type = BTRFS_EXTENT_ITEM_KEY;
1467 key.offset = num_bytes;
1468
1469 want = extent_ref_type(parent, owner);
1470 if (insert) {
1471 extra_size = btrfs_extent_inline_ref_size(want);
1472 path->keep_locks = 1;
1473 } else
1474 extra_size = -1;
1475 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1476 if (ret < 0) {
1477 err = ret;
1478 goto out;
1479 }
1480 BUG_ON(ret);
1481
1482 leaf = path->nodes[0];
1483 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1484 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1485 if (item_size < sizeof(*ei)) {
1486 if (!insert) {
1487 err = -ENOENT;
1488 goto out;
1489 }
1490 ret = convert_extent_item_v0(trans, root, path, owner,
1491 extra_size);
1492 if (ret < 0) {
1493 err = ret;
1494 goto out;
1495 }
1496 leaf = path->nodes[0];
1497 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1498 }
1499 #endif
1500 BUG_ON(item_size < sizeof(*ei));
1501
1502 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1503 flags = btrfs_extent_flags(leaf, ei);
1504
1505 ptr = (unsigned long)(ei + 1);
1506 end = (unsigned long)ei + item_size;
1507
1508 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1509 ptr += sizeof(struct btrfs_tree_block_info);
1510 BUG_ON(ptr > end);
1511 } else {
1512 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1513 }
1514
1515 err = -ENOENT;
1516 while (1) {
1517 if (ptr >= end) {
1518 WARN_ON(ptr > end);
1519 break;
1520 }
1521 iref = (struct btrfs_extent_inline_ref *)ptr;
1522 type = btrfs_extent_inline_ref_type(leaf, iref);
1523 if (want < type)
1524 break;
1525 if (want > type) {
1526 ptr += btrfs_extent_inline_ref_size(type);
1527 continue;
1528 }
1529
1530 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1531 struct btrfs_extent_data_ref *dref;
1532 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1533 if (match_extent_data_ref(leaf, dref, root_objectid,
1534 owner, offset)) {
1535 err = 0;
1536 break;
1537 }
1538 if (hash_extent_data_ref_item(leaf, dref) <
1539 hash_extent_data_ref(root_objectid, owner, offset))
1540 break;
1541 } else {
1542 u64 ref_offset;
1543 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1544 if (parent > 0) {
1545 if (parent == ref_offset) {
1546 err = 0;
1547 break;
1548 }
1549 if (ref_offset < parent)
1550 break;
1551 } else {
1552 if (root_objectid == ref_offset) {
1553 err = 0;
1554 break;
1555 }
1556 if (ref_offset < root_objectid)
1557 break;
1558 }
1559 }
1560 ptr += btrfs_extent_inline_ref_size(type);
1561 }
1562 if (err == -ENOENT && insert) {
1563 if (item_size + extra_size >=
1564 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1565 err = -EAGAIN;
1566 goto out;
1567 }
1568 /*
1569 * To add new inline back ref, we have to make sure
1570 * there is no corresponding back ref item.
1571 * For simplicity, we just do not add new inline back
1572 * ref if there is any kind of item for this block
1573 */
1574 if (find_next_key(path, 0, &key) == 0 &&
1575 key.objectid == bytenr &&
1576 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1577 err = -EAGAIN;
1578 goto out;
1579 }
1580 }
1581 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1582 out:
1583 if (insert) {
1584 path->keep_locks = 0;
1585 btrfs_unlock_up_safe(path, 1);
1586 }
1587 return err;
1588 }
1589
1590 /*
1591 * helper to add new inline back ref
1592 */
1593 static noinline_for_stack
1594 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root,
1596 struct btrfs_path *path,
1597 struct btrfs_extent_inline_ref *iref,
1598 u64 parent, u64 root_objectid,
1599 u64 owner, u64 offset, int refs_to_add,
1600 struct btrfs_delayed_extent_op *extent_op)
1601 {
1602 struct extent_buffer *leaf;
1603 struct btrfs_extent_item *ei;
1604 unsigned long ptr;
1605 unsigned long end;
1606 unsigned long item_offset;
1607 u64 refs;
1608 int size;
1609 int type;
1610 int ret;
1611
1612 leaf = path->nodes[0];
1613 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1614 item_offset = (unsigned long)iref - (unsigned long)ei;
1615
1616 type = extent_ref_type(parent, owner);
1617 size = btrfs_extent_inline_ref_size(type);
1618
1619 ret = btrfs_extend_item(trans, root, path, size);
1620
1621 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1622 refs = btrfs_extent_refs(leaf, ei);
1623 refs += refs_to_add;
1624 btrfs_set_extent_refs(leaf, ei, refs);
1625 if (extent_op)
1626 __run_delayed_extent_op(extent_op, leaf, ei);
1627
1628 ptr = (unsigned long)ei + item_offset;
1629 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1630 if (ptr < end - size)
1631 memmove_extent_buffer(leaf, ptr + size, ptr,
1632 end - size - ptr);
1633
1634 iref = (struct btrfs_extent_inline_ref *)ptr;
1635 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1636 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1637 struct btrfs_extent_data_ref *dref;
1638 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1639 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1640 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1641 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1642 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1643 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1644 struct btrfs_shared_data_ref *sref;
1645 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1646 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1647 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1648 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1649 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1650 } else {
1651 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1652 }
1653 btrfs_mark_buffer_dirty(leaf);
1654 return 0;
1655 }
1656
1657 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1658 struct btrfs_root *root,
1659 struct btrfs_path *path,
1660 struct btrfs_extent_inline_ref **ref_ret,
1661 u64 bytenr, u64 num_bytes, u64 parent,
1662 u64 root_objectid, u64 owner, u64 offset)
1663 {
1664 int ret;
1665
1666 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1667 bytenr, num_bytes, parent,
1668 root_objectid, owner, offset, 0);
1669 if (ret != -ENOENT)
1670 return ret;
1671
1672 btrfs_release_path(path);
1673 *ref_ret = NULL;
1674
1675 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1676 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1677 root_objectid);
1678 } else {
1679 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1680 root_objectid, owner, offset);
1681 }
1682 return ret;
1683 }
1684
1685 /*
1686 * helper to update/remove inline back ref
1687 */
1688 static noinline_for_stack
1689 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1690 struct btrfs_root *root,
1691 struct btrfs_path *path,
1692 struct btrfs_extent_inline_ref *iref,
1693 int refs_to_mod,
1694 struct btrfs_delayed_extent_op *extent_op)
1695 {
1696 struct extent_buffer *leaf;
1697 struct btrfs_extent_item *ei;
1698 struct btrfs_extent_data_ref *dref = NULL;
1699 struct btrfs_shared_data_ref *sref = NULL;
1700 unsigned long ptr;
1701 unsigned long end;
1702 u32 item_size;
1703 int size;
1704 int type;
1705 int ret;
1706 u64 refs;
1707
1708 leaf = path->nodes[0];
1709 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1710 refs = btrfs_extent_refs(leaf, ei);
1711 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1712 refs += refs_to_mod;
1713 btrfs_set_extent_refs(leaf, ei, refs);
1714 if (extent_op)
1715 __run_delayed_extent_op(extent_op, leaf, ei);
1716
1717 type = btrfs_extent_inline_ref_type(leaf, iref);
1718
1719 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1720 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1721 refs = btrfs_extent_data_ref_count(leaf, dref);
1722 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1723 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1724 refs = btrfs_shared_data_ref_count(leaf, sref);
1725 } else {
1726 refs = 1;
1727 BUG_ON(refs_to_mod != -1);
1728 }
1729
1730 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1731 refs += refs_to_mod;
1732
1733 if (refs > 0) {
1734 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1735 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1736 else
1737 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1738 } else {
1739 size = btrfs_extent_inline_ref_size(type);
1740 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1741 ptr = (unsigned long)iref;
1742 end = (unsigned long)ei + item_size;
1743 if (ptr + size < end)
1744 memmove_extent_buffer(leaf, ptr, ptr + size,
1745 end - ptr - size);
1746 item_size -= size;
1747 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1748 }
1749 btrfs_mark_buffer_dirty(leaf);
1750 return 0;
1751 }
1752
1753 static noinline_for_stack
1754 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1755 struct btrfs_root *root,
1756 struct btrfs_path *path,
1757 u64 bytenr, u64 num_bytes, u64 parent,
1758 u64 root_objectid, u64 owner,
1759 u64 offset, int refs_to_add,
1760 struct btrfs_delayed_extent_op *extent_op)
1761 {
1762 struct btrfs_extent_inline_ref *iref;
1763 int ret;
1764
1765 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1766 bytenr, num_bytes, parent,
1767 root_objectid, owner, offset, 1);
1768 if (ret == 0) {
1769 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1770 ret = update_inline_extent_backref(trans, root, path, iref,
1771 refs_to_add, extent_op);
1772 } else if (ret == -ENOENT) {
1773 ret = setup_inline_extent_backref(trans, root, path, iref,
1774 parent, root_objectid,
1775 owner, offset, refs_to_add,
1776 extent_op);
1777 }
1778 return ret;
1779 }
1780
1781 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1782 struct btrfs_root *root,
1783 struct btrfs_path *path,
1784 u64 bytenr, u64 parent, u64 root_objectid,
1785 u64 owner, u64 offset, int refs_to_add)
1786 {
1787 int ret;
1788 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1789 BUG_ON(refs_to_add != 1);
1790 ret = insert_tree_block_ref(trans, root, path, bytenr,
1791 parent, root_objectid);
1792 } else {
1793 ret = insert_extent_data_ref(trans, root, path, bytenr,
1794 parent, root_objectid,
1795 owner, offset, refs_to_add);
1796 }
1797 return ret;
1798 }
1799
1800 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 struct btrfs_path *path,
1803 struct btrfs_extent_inline_ref *iref,
1804 int refs_to_drop, int is_data)
1805 {
1806 int ret;
1807
1808 BUG_ON(!is_data && refs_to_drop != 1);
1809 if (iref) {
1810 ret = update_inline_extent_backref(trans, root, path, iref,
1811 -refs_to_drop, NULL);
1812 } else if (is_data) {
1813 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1814 } else {
1815 ret = btrfs_del_item(trans, root, path);
1816 }
1817 return ret;
1818 }
1819
1820 static int btrfs_issue_discard(struct block_device *bdev,
1821 u64 start, u64 len)
1822 {
1823 return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
1824 }
1825
1826 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1827 u64 num_bytes, u64 *actual_bytes)
1828 {
1829 int ret;
1830 u64 discarded_bytes = 0;
1831 struct btrfs_bio *bbio = NULL;
1832
1833
1834 /* Tell the block device(s) that the sectors can be discarded */
1835 ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
1836 bytenr, &num_bytes, &bbio, 0);
1837 if (!ret) {
1838 struct btrfs_bio_stripe *stripe = bbio->stripes;
1839 int i;
1840
1841
1842 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
1843 if (!stripe->dev->can_discard)
1844 continue;
1845
1846 ret = btrfs_issue_discard(stripe->dev->bdev,
1847 stripe->physical,
1848 stripe->length);
1849 if (!ret)
1850 discarded_bytes += stripe->length;
1851 else if (ret != -EOPNOTSUPP)
1852 break;
1853
1854 /*
1855 * Just in case we get back EOPNOTSUPP for some reason,
1856 * just ignore the return value so we don't screw up
1857 * people calling discard_extent.
1858 */
1859 ret = 0;
1860 }
1861 kfree(bbio);
1862 }
1863
1864 if (actual_bytes)
1865 *actual_bytes = discarded_bytes;
1866
1867
1868 return ret;
1869 }
1870
1871 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1872 struct btrfs_root *root,
1873 u64 bytenr, u64 num_bytes, u64 parent,
1874 u64 root_objectid, u64 owner, u64 offset, int for_cow)
1875 {
1876 int ret;
1877 struct btrfs_fs_info *fs_info = root->fs_info;
1878
1879 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1880 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1881
1882 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1883 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
1884 num_bytes,
1885 parent, root_objectid, (int)owner,
1886 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1887 } else {
1888 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
1889 num_bytes,
1890 parent, root_objectid, owner, offset,
1891 BTRFS_ADD_DELAYED_REF, NULL, for_cow);
1892 }
1893 return ret;
1894 }
1895
1896 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1897 struct btrfs_root *root,
1898 u64 bytenr, u64 num_bytes,
1899 u64 parent, u64 root_objectid,
1900 u64 owner, u64 offset, int refs_to_add,
1901 struct btrfs_delayed_extent_op *extent_op)
1902 {
1903 struct btrfs_path *path;
1904 struct extent_buffer *leaf;
1905 struct btrfs_extent_item *item;
1906 u64 refs;
1907 int ret;
1908 int err = 0;
1909
1910 path = btrfs_alloc_path();
1911 if (!path)
1912 return -ENOMEM;
1913
1914 path->reada = 1;
1915 path->leave_spinning = 1;
1916 /* this will setup the path even if it fails to insert the back ref */
1917 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1918 path, bytenr, num_bytes, parent,
1919 root_objectid, owner, offset,
1920 refs_to_add, extent_op);
1921 if (ret == 0)
1922 goto out;
1923
1924 if (ret != -EAGAIN) {
1925 err = ret;
1926 goto out;
1927 }
1928
1929 leaf = path->nodes[0];
1930 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1931 refs = btrfs_extent_refs(leaf, item);
1932 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1933 if (extent_op)
1934 __run_delayed_extent_op(extent_op, leaf, item);
1935
1936 btrfs_mark_buffer_dirty(leaf);
1937 btrfs_release_path(path);
1938
1939 path->reada = 1;
1940 path->leave_spinning = 1;
1941
1942 /* now insert the actual backref */
1943 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1944 path, bytenr, parent, root_objectid,
1945 owner, offset, refs_to_add);
1946 BUG_ON(ret);
1947 out:
1948 btrfs_free_path(path);
1949 return err;
1950 }
1951
1952 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1953 struct btrfs_root *root,
1954 struct btrfs_delayed_ref_node *node,
1955 struct btrfs_delayed_extent_op *extent_op,
1956 int insert_reserved)
1957 {
1958 int ret = 0;
1959 struct btrfs_delayed_data_ref *ref;
1960 struct btrfs_key ins;
1961 u64 parent = 0;
1962 u64 ref_root = 0;
1963 u64 flags = 0;
1964
1965 ins.objectid = node->bytenr;
1966 ins.offset = node->num_bytes;
1967 ins.type = BTRFS_EXTENT_ITEM_KEY;
1968
1969 ref = btrfs_delayed_node_to_data_ref(node);
1970 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1971 parent = ref->parent;
1972 else
1973 ref_root = ref->root;
1974
1975 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1976 if (extent_op) {
1977 BUG_ON(extent_op->update_key);
1978 flags |= extent_op->flags_to_set;
1979 }
1980 ret = alloc_reserved_file_extent(trans, root,
1981 parent, ref_root, flags,
1982 ref->objectid, ref->offset,
1983 &ins, node->ref_mod);
1984 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1985 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1986 node->num_bytes, parent,
1987 ref_root, ref->objectid,
1988 ref->offset, node->ref_mod,
1989 extent_op);
1990 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1991 ret = __btrfs_free_extent(trans, root, node->bytenr,
1992 node->num_bytes, parent,
1993 ref_root, ref->objectid,
1994 ref->offset, node->ref_mod,
1995 extent_op);
1996 } else {
1997 BUG();
1998 }
1999 return ret;
2000 }
2001
2002 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2003 struct extent_buffer *leaf,
2004 struct btrfs_extent_item *ei)
2005 {
2006 u64 flags = btrfs_extent_flags(leaf, ei);
2007 if (extent_op->update_flags) {
2008 flags |= extent_op->flags_to_set;
2009 btrfs_set_extent_flags(leaf, ei, flags);
2010 }
2011
2012 if (extent_op->update_key) {
2013 struct btrfs_tree_block_info *bi;
2014 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2015 bi = (struct btrfs_tree_block_info *)(ei + 1);
2016 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2017 }
2018 }
2019
2020 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2021 struct btrfs_root *root,
2022 struct btrfs_delayed_ref_node *node,
2023 struct btrfs_delayed_extent_op *extent_op)
2024 {
2025 struct btrfs_key key;
2026 struct btrfs_path *path;
2027 struct btrfs_extent_item *ei;
2028 struct extent_buffer *leaf;
2029 u32 item_size;
2030 int ret;
2031 int err = 0;
2032
2033 path = btrfs_alloc_path();
2034 if (!path)
2035 return -ENOMEM;
2036
2037 key.objectid = node->bytenr;
2038 key.type = BTRFS_EXTENT_ITEM_KEY;
2039 key.offset = node->num_bytes;
2040
2041 path->reada = 1;
2042 path->leave_spinning = 1;
2043 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2044 path, 0, 1);
2045 if (ret < 0) {
2046 err = ret;
2047 goto out;
2048 }
2049 if (ret > 0) {
2050 err = -EIO;
2051 goto out;
2052 }
2053
2054 leaf = path->nodes[0];
2055 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2056 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2057 if (item_size < sizeof(*ei)) {
2058 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2059 path, (u64)-1, 0);
2060 if (ret < 0) {
2061 err = ret;
2062 goto out;
2063 }
2064 leaf = path->nodes[0];
2065 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2066 }
2067 #endif
2068 BUG_ON(item_size < sizeof(*ei));
2069 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2070 __run_delayed_extent_op(extent_op, leaf, ei);
2071
2072 btrfs_mark_buffer_dirty(leaf);
2073 out:
2074 btrfs_free_path(path);
2075 return err;
2076 }
2077
2078 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2079 struct btrfs_root *root,
2080 struct btrfs_delayed_ref_node *node,
2081 struct btrfs_delayed_extent_op *extent_op,
2082 int insert_reserved)
2083 {
2084 int ret = 0;
2085 struct btrfs_delayed_tree_ref *ref;
2086 struct btrfs_key ins;
2087 u64 parent = 0;
2088 u64 ref_root = 0;
2089
2090 ins.objectid = node->bytenr;
2091 ins.offset = node->num_bytes;
2092 ins.type = BTRFS_EXTENT_ITEM_KEY;
2093
2094 ref = btrfs_delayed_node_to_tree_ref(node);
2095 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2096 parent = ref->parent;
2097 else
2098 ref_root = ref->root;
2099
2100 BUG_ON(node->ref_mod != 1);
2101 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2102 BUG_ON(!extent_op || !extent_op->update_flags ||
2103 !extent_op->update_key);
2104 ret = alloc_reserved_tree_block(trans, root,
2105 parent, ref_root,
2106 extent_op->flags_to_set,
2107 &extent_op->key,
2108 ref->level, &ins);
2109 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2110 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2111 node->num_bytes, parent, ref_root,
2112 ref->level, 0, 1, extent_op);
2113 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2114 ret = __btrfs_free_extent(trans, root, node->bytenr,
2115 node->num_bytes, parent, ref_root,
2116 ref->level, 0, 1, extent_op);
2117 } else {
2118 BUG();
2119 }
2120 return ret;
2121 }
2122
2123 /* helper function to actually process a single delayed ref entry */
2124 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2125 struct btrfs_root *root,
2126 struct btrfs_delayed_ref_node *node,
2127 struct btrfs_delayed_extent_op *extent_op,
2128 int insert_reserved)
2129 {
2130 int ret;
2131 if (btrfs_delayed_ref_is_head(node)) {
2132 struct btrfs_delayed_ref_head *head;
2133 /*
2134 * we've hit the end of the chain and we were supposed
2135 * to insert this extent into the tree. But, it got
2136 * deleted before we ever needed to insert it, so all
2137 * we have to do is clean up the accounting
2138 */
2139 BUG_ON(extent_op);
2140 head = btrfs_delayed_node_to_head(node);
2141 if (insert_reserved) {
2142 btrfs_pin_extent(root, node->bytenr,
2143 node->num_bytes, 1);
2144 if (head->is_data) {
2145 ret = btrfs_del_csums(trans, root,
2146 node->bytenr,
2147 node->num_bytes);
2148 BUG_ON(ret);
2149 }
2150 }
2151 mutex_unlock(&head->mutex);
2152 return 0;
2153 }
2154
2155 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2156 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2157 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2158 insert_reserved);
2159 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2160 node->type == BTRFS_SHARED_DATA_REF_KEY)
2161 ret = run_delayed_data_ref(trans, root, node, extent_op,
2162 insert_reserved);
2163 else
2164 BUG();
2165 return ret;
2166 }
2167
2168 static noinline struct btrfs_delayed_ref_node *
2169 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2170 {
2171 struct rb_node *node;
2172 struct btrfs_delayed_ref_node *ref;
2173 int action = BTRFS_ADD_DELAYED_REF;
2174 again:
2175 /*
2176 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2177 * this prevents ref count from going down to zero when
2178 * there still are pending delayed ref.
2179 */
2180 node = rb_prev(&head->node.rb_node);
2181 while (1) {
2182 if (!node)
2183 break;
2184 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2185 rb_node);
2186 if (ref->bytenr != head->node.bytenr)
2187 break;
2188 if (ref->action == action)
2189 return ref;
2190 node = rb_prev(node);
2191 }
2192 if (action == BTRFS_ADD_DELAYED_REF) {
2193 action = BTRFS_DROP_DELAYED_REF;
2194 goto again;
2195 }
2196 return NULL;
2197 }
2198
2199 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2200 struct btrfs_root *root,
2201 struct list_head *cluster)
2202 {
2203 struct btrfs_delayed_ref_root *delayed_refs;
2204 struct btrfs_delayed_ref_node *ref;
2205 struct btrfs_delayed_ref_head *locked_ref = NULL;
2206 struct btrfs_delayed_extent_op *extent_op;
2207 int ret;
2208 int count = 0;
2209 int must_insert_reserved = 0;
2210
2211 delayed_refs = &trans->transaction->delayed_refs;
2212 while (1) {
2213 if (!locked_ref) {
2214 /* pick a new head ref from the cluster list */
2215 if (list_empty(cluster))
2216 break;
2217
2218 locked_ref = list_entry(cluster->next,
2219 struct btrfs_delayed_ref_head, cluster);
2220
2221 /* grab the lock that says we are going to process
2222 * all the refs for this head */
2223 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2224
2225 /*
2226 * we may have dropped the spin lock to get the head
2227 * mutex lock, and that might have given someone else
2228 * time to free the head. If that's true, it has been
2229 * removed from our list and we can move on.
2230 */
2231 if (ret == -EAGAIN) {
2232 locked_ref = NULL;
2233 count++;
2234 continue;
2235 }
2236 }
2237
2238 /*
2239 * locked_ref is the head node, so we have to go one
2240 * node back for any delayed ref updates
2241 */
2242 ref = select_delayed_ref(locked_ref);
2243
2244 if (ref && ref->seq &&
2245 btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
2246 /*
2247 * there are still refs with lower seq numbers in the
2248 * process of being added. Don't run this ref yet.
2249 */
2250 list_del_init(&locked_ref->cluster);
2251 mutex_unlock(&locked_ref->mutex);
2252 locked_ref = NULL;
2253 delayed_refs->num_heads_ready++;
2254 spin_unlock(&delayed_refs->lock);
2255 cond_resched();
2256 spin_lock(&delayed_refs->lock);
2257 continue;
2258 }
2259
2260 /*
2261 * record the must insert reserved flag before we
2262 * drop the spin lock.
2263 */
2264 must_insert_reserved = locked_ref->must_insert_reserved;
2265 locked_ref->must_insert_reserved = 0;
2266
2267 extent_op = locked_ref->extent_op;
2268 locked_ref->extent_op = NULL;
2269
2270 if (!ref) {
2271 /* All delayed refs have been processed, Go ahead
2272 * and send the head node to run_one_delayed_ref,
2273 * so that any accounting fixes can happen
2274 */
2275 ref = &locked_ref->node;
2276
2277 if (extent_op && must_insert_reserved) {
2278 kfree(extent_op);
2279 extent_op = NULL;
2280 }
2281
2282 if (extent_op) {
2283 spin_unlock(&delayed_refs->lock);
2284
2285 ret = run_delayed_extent_op(trans, root,
2286 ref, extent_op);
2287 BUG_ON(ret);
2288 kfree(extent_op);
2289
2290 goto next;
2291 }
2292
2293 list_del_init(&locked_ref->cluster);
2294 locked_ref = NULL;
2295 }
2296
2297 ref->in_tree = 0;
2298 rb_erase(&ref->rb_node, &delayed_refs->root);
2299 delayed_refs->num_entries--;
2300 /*
2301 * we modified num_entries, but as we're currently running
2302 * delayed refs, skip
2303 * wake_up(&delayed_refs->seq_wait);
2304 * here.
2305 */
2306 spin_unlock(&delayed_refs->lock);
2307
2308 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2309 must_insert_reserved);
2310 BUG_ON(ret);
2311
2312 btrfs_put_delayed_ref(ref);
2313 kfree(extent_op);
2314 count++;
2315 next:
2316 do_chunk_alloc(trans, root->fs_info->extent_root,
2317 2 * 1024 * 1024,
2318 btrfs_get_alloc_profile(root, 0),
2319 CHUNK_ALLOC_NO_FORCE);
2320 cond_resched();
2321 spin_lock(&delayed_refs->lock);
2322 }
2323 return count;
2324 }
2325
2326
2327 static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
2328 unsigned long num_refs)
2329 {
2330 struct list_head *first_seq = delayed_refs->seq_head.next;
2331
2332 spin_unlock(&delayed_refs->lock);
2333 pr_debug("waiting for more refs (num %ld, first %p)\n",
2334 num_refs, first_seq);
2335 wait_event(delayed_refs->seq_wait,
2336 num_refs != delayed_refs->num_entries ||
2337 delayed_refs->seq_head.next != first_seq);
2338 pr_debug("done waiting for more refs (num %ld, first %p)\n",
2339 delayed_refs->num_entries, delayed_refs->seq_head.next);
2340 spin_lock(&delayed_refs->lock);
2341 }
2342
2343 /*
2344 * this starts processing the delayed reference count updates and
2345 * extent insertions we have queued up so far. count can be
2346 * 0, which means to process everything in the tree at the start
2347 * of the run (but not newly added entries), or it can be some target
2348 * number you'd like to process.
2349 */
2350 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2351 struct btrfs_root *root, unsigned long count)
2352 {
2353 struct rb_node *node;
2354 struct btrfs_delayed_ref_root *delayed_refs;
2355 struct btrfs_delayed_ref_node *ref;
2356 struct list_head cluster;
2357 int ret;
2358 u64 delayed_start;
2359 int run_all = count == (unsigned long)-1;
2360 int run_most = 0;
2361 unsigned long num_refs = 0;
2362 int consider_waiting;
2363
2364 if (root == root->fs_info->extent_root)
2365 root = root->fs_info->tree_root;
2366
2367 do_chunk_alloc(trans, root->fs_info->extent_root,
2368 2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
2369 CHUNK_ALLOC_NO_FORCE);
2370
2371 delayed_refs = &trans->transaction->delayed_refs;
2372 INIT_LIST_HEAD(&cluster);
2373 again:
2374 consider_waiting = 0;
2375 spin_lock(&delayed_refs->lock);
2376 if (count == 0) {
2377 count = delayed_refs->num_entries * 2;
2378 run_most = 1;
2379 }
2380 while (1) {
2381 if (!(run_all || run_most) &&
2382 delayed_refs->num_heads_ready < 64)
2383 break;
2384
2385 /*
2386 * go find something we can process in the rbtree. We start at
2387 * the beginning of the tree, and then build a cluster
2388 * of refs to process starting at the first one we are able to
2389 * lock
2390 */
2391 delayed_start = delayed_refs->run_delayed_start;
2392 ret = btrfs_find_ref_cluster(trans, &cluster,
2393 delayed_refs->run_delayed_start);
2394 if (ret)
2395 break;
2396
2397 if (delayed_start >= delayed_refs->run_delayed_start) {
2398 if (consider_waiting == 0) {
2399 /*
2400 * btrfs_find_ref_cluster looped. let's do one
2401 * more cycle. if we don't run any delayed ref
2402 * during that cycle (because we can't because
2403 * all of them are blocked) and if the number of
2404 * refs doesn't change, we avoid busy waiting.
2405 */
2406 consider_waiting = 1;
2407 num_refs = delayed_refs->num_entries;
2408 } else {
2409 wait_for_more_refs(delayed_refs, num_refs);
2410 /*
2411 * after waiting, things have changed. we
2412 * dropped the lock and someone else might have
2413 * run some refs, built new clusters and so on.
2414 * therefore, we restart staleness detection.
2415 */
2416 consider_waiting = 0;
2417 }
2418 }
2419
2420 ret = run_clustered_refs(trans, root, &cluster);
2421 BUG_ON(ret < 0);
2422
2423 count -= min_t(unsigned long, ret, count);
2424
2425 if (count == 0)
2426 break;
2427
2428 if (ret || delayed_refs->run_delayed_start == 0) {
2429 /* refs were run, let's reset staleness detection */
2430 consider_waiting = 0;
2431 }
2432 }
2433
2434 if (run_all) {
2435 node = rb_first(&delayed_refs->root);
2436 if (!node)
2437 goto out;
2438 count = (unsigned long)-1;
2439
2440 while (node) {
2441 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2442 rb_node);
2443 if (btrfs_delayed_ref_is_head(ref)) {
2444 struct btrfs_delayed_ref_head *head;
2445
2446 head = btrfs_delayed_node_to_head(ref);
2447 atomic_inc(&ref->refs);
2448
2449 spin_unlock(&delayed_refs->lock);
2450 /*
2451 * Mutex was contended, block until it's
2452 * released and try again
2453 */
2454 mutex_lock(&head->mutex);
2455 mutex_unlock(&head->mutex);
2456
2457 btrfs_put_delayed_ref(ref);
2458 cond_resched();
2459 goto again;
2460 }
2461 node = rb_next(node);
2462 }
2463 spin_unlock(&delayed_refs->lock);
2464 schedule_timeout(1);
2465 goto again;
2466 }
2467 out:
2468 spin_unlock(&delayed_refs->lock);
2469 return 0;
2470 }
2471
2472 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2473 struct btrfs_root *root,
2474 u64 bytenr, u64 num_bytes, u64 flags,
2475 int is_data)
2476 {
2477 struct btrfs_delayed_extent_op *extent_op;
2478 int ret;
2479
2480 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2481 if (!extent_op)
2482 return -ENOMEM;
2483
2484 extent_op->flags_to_set = flags;
2485 extent_op->update_flags = 1;
2486 extent_op->update_key = 0;
2487 extent_op->is_data = is_data ? 1 : 0;
2488
2489 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2490 num_bytes, extent_op);
2491 if (ret)
2492 kfree(extent_op);
2493 return ret;
2494 }
2495
2496 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2497 struct btrfs_root *root,
2498 struct btrfs_path *path,
2499 u64 objectid, u64 offset, u64 bytenr)
2500 {
2501 struct btrfs_delayed_ref_head *head;
2502 struct btrfs_delayed_ref_node *ref;
2503 struct btrfs_delayed_data_ref *data_ref;
2504 struct btrfs_delayed_ref_root *delayed_refs;
2505 struct rb_node *node;
2506 int ret = 0;
2507
2508 ret = -ENOENT;
2509 delayed_refs = &trans->transaction->delayed_refs;
2510 spin_lock(&delayed_refs->lock);
2511 head = btrfs_find_delayed_ref_head(trans, bytenr);
2512 if (!head)
2513 goto out;
2514
2515 if (!mutex_trylock(&head->mutex)) {
2516 atomic_inc(&head->node.refs);
2517 spin_unlock(&delayed_refs->lock);
2518
2519 btrfs_release_path(path);
2520
2521 /*
2522 * Mutex was contended, block until it's released and let
2523 * caller try again
2524 */
2525 mutex_lock(&head->mutex);
2526 mutex_unlock(&head->mutex);
2527 btrfs_put_delayed_ref(&head->node);
2528 return -EAGAIN;
2529 }
2530
2531 node = rb_prev(&head->node.rb_node);
2532 if (!node)
2533 goto out_unlock;
2534
2535 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2536
2537 if (ref->bytenr != bytenr)
2538 goto out_unlock;
2539
2540 ret = 1;
2541 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2542 goto out_unlock;
2543
2544 data_ref = btrfs_delayed_node_to_data_ref(ref);
2545
2546 node = rb_prev(node);
2547 if (node) {
2548 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2549 if (ref->bytenr == bytenr)
2550 goto out_unlock;
2551 }
2552
2553 if (data_ref->root != root->root_key.objectid ||
2554 data_ref->objectid != objectid || data_ref->offset != offset)
2555 goto out_unlock;
2556
2557 ret = 0;
2558 out_unlock:
2559 mutex_unlock(&head->mutex);
2560 out:
2561 spin_unlock(&delayed_refs->lock);
2562 return ret;
2563 }
2564
2565 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2566 struct btrfs_root *root,
2567 struct btrfs_path *path,
2568 u64 objectid, u64 offset, u64 bytenr)
2569 {
2570 struct btrfs_root *extent_root = root->fs_info->extent_root;
2571 struct extent_buffer *leaf;
2572 struct btrfs_extent_data_ref *ref;
2573 struct btrfs_extent_inline_ref *iref;
2574 struct btrfs_extent_item *ei;
2575 struct btrfs_key key;
2576 u32 item_size;
2577 int ret;
2578
2579 key.objectid = bytenr;
2580 key.offset = (u64)-1;
2581 key.type = BTRFS_EXTENT_ITEM_KEY;
2582
2583 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2584 if (ret < 0)
2585 goto out;
2586 BUG_ON(ret == 0);
2587
2588 ret = -ENOENT;
2589 if (path->slots[0] == 0)
2590 goto out;
2591
2592 path->slots[0]--;
2593 leaf = path->nodes[0];
2594 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2595
2596 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2597 goto out;
2598
2599 ret = 1;
2600 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2601 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2602 if (item_size < sizeof(*ei)) {
2603 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2604 goto out;
2605 }
2606 #endif
2607 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2608
2609 if (item_size != sizeof(*ei) +
2610 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2611 goto out;
2612
2613 if (btrfs_extent_generation(leaf, ei) <=
2614 btrfs_root_last_snapshot(&root->root_item))
2615 goto out;
2616
2617 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2618 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2619 BTRFS_EXTENT_DATA_REF_KEY)
2620 goto out;
2621
2622 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2623 if (btrfs_extent_refs(leaf, ei) !=
2624 btrfs_extent_data_ref_count(leaf, ref) ||
2625 btrfs_extent_data_ref_root(leaf, ref) !=
2626 root->root_key.objectid ||
2627 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2628 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2629 goto out;
2630
2631 ret = 0;
2632 out:
2633 return ret;
2634 }
2635
2636 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2637 struct btrfs_root *root,
2638 u64 objectid, u64 offset, u64 bytenr)
2639 {
2640 struct btrfs_path *path;
2641 int ret;
2642 int ret2;
2643
2644 path = btrfs_alloc_path();
2645 if (!path)
2646 return -ENOENT;
2647
2648 do {
2649 ret = check_committed_ref(trans, root, path, objectid,
2650 offset, bytenr);
2651 if (ret && ret != -ENOENT)
2652 goto out;
2653
2654 ret2 = check_delayed_ref(trans, root, path, objectid,
2655 offset, bytenr);
2656 } while (ret2 == -EAGAIN);
2657
2658 if (ret2 && ret2 != -ENOENT) {
2659 ret = ret2;
2660 goto out;
2661 }
2662
2663 if (ret != -ENOENT || ret2 != -ENOENT)
2664 ret = 0;
2665 out:
2666 btrfs_free_path(path);
2667 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2668 WARN_ON(ret > 0);
2669 return ret;
2670 }
2671
2672 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2673 struct btrfs_root *root,
2674 struct extent_buffer *buf,
2675 int full_backref, int inc, int for_cow)
2676 {
2677 u64 bytenr;
2678 u64 num_bytes;
2679 u64 parent;
2680 u64 ref_root;
2681 u32 nritems;
2682 struct btrfs_key key;
2683 struct btrfs_file_extent_item *fi;
2684 int i;
2685 int level;
2686 int ret = 0;
2687 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2688 u64, u64, u64, u64, u64, u64, int);
2689
2690 ref_root = btrfs_header_owner(buf);
2691 nritems = btrfs_header_nritems(buf);
2692 level = btrfs_header_level(buf);
2693
2694 if (!root->ref_cows && level == 0)
2695 return 0;
2696
2697 if (inc)
2698 process_func = btrfs_inc_extent_ref;
2699 else
2700 process_func = btrfs_free_extent;
2701
2702 if (full_backref)
2703 parent = buf->start;
2704 else
2705 parent = 0;
2706
2707 for (i = 0; i < nritems; i++) {
2708 if (level == 0) {
2709 btrfs_item_key_to_cpu(buf, &key, i);
2710 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2711 continue;
2712 fi = btrfs_item_ptr(buf, i,
2713 struct btrfs_file_extent_item);
2714 if (btrfs_file_extent_type(buf, fi) ==
2715 BTRFS_FILE_EXTENT_INLINE)
2716 continue;
2717 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2718 if (bytenr == 0)
2719 continue;
2720
2721 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2722 key.offset -= btrfs_file_extent_offset(buf, fi);
2723 ret = process_func(trans, root, bytenr, num_bytes,
2724 parent, ref_root, key.objectid,
2725 key.offset, for_cow);
2726 if (ret)
2727 goto fail;
2728 } else {
2729 bytenr = btrfs_node_blockptr(buf, i);
2730 num_bytes = btrfs_level_size(root, level - 1);
2731 ret = process_func(trans, root, bytenr, num_bytes,
2732 parent, ref_root, level - 1, 0,
2733 for_cow);
2734 if (ret)
2735 goto fail;
2736 }
2737 }
2738 return 0;
2739 fail:
2740 BUG();
2741 return ret;
2742 }
2743
2744 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2745 struct extent_buffer *buf, int full_backref, int for_cow)
2746 {
2747 return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
2748 }
2749
2750 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2751 struct extent_buffer *buf, int full_backref, int for_cow)
2752 {
2753 return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
2754 }
2755
2756 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2757 struct btrfs_root *root,
2758 struct btrfs_path *path,
2759 struct btrfs_block_group_cache *cache)
2760 {
2761 int ret;
2762 struct btrfs_root *extent_root = root->fs_info->extent_root;
2763 unsigned long bi;
2764 struct extent_buffer *leaf;
2765
2766 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2767 if (ret < 0)
2768 goto fail;
2769 BUG_ON(ret);
2770
2771 leaf = path->nodes[0];
2772 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2773 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2774 btrfs_mark_buffer_dirty(leaf);
2775 btrfs_release_path(path);
2776 fail:
2777 if (ret)
2778 return ret;
2779 return 0;
2780
2781 }
2782
2783 static struct btrfs_block_group_cache *
2784 next_block_group(struct btrfs_root *root,
2785 struct btrfs_block_group_cache *cache)
2786 {
2787 struct rb_node *node;
2788 spin_lock(&root->fs_info->block_group_cache_lock);
2789 node = rb_next(&cache->cache_node);
2790 btrfs_put_block_group(cache);
2791 if (node) {
2792 cache = rb_entry(node, struct btrfs_block_group_cache,
2793 cache_node);
2794 btrfs_get_block_group(cache);
2795 } else
2796 cache = NULL;
2797 spin_unlock(&root->fs_info->block_group_cache_lock);
2798 return cache;
2799 }
2800
2801 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2802 struct btrfs_trans_handle *trans,
2803 struct btrfs_path *path)
2804 {
2805 struct btrfs_root *root = block_group->fs_info->tree_root;
2806 struct inode *inode = NULL;
2807 u64 alloc_hint = 0;
2808 int dcs = BTRFS_DC_ERROR;
2809 int num_pages = 0;
2810 int retries = 0;
2811 int ret = 0;
2812
2813 /*
2814 * If this block group is smaller than 100 megs don't bother caching the
2815 * block group.
2816 */
2817 if (block_group->key.offset < (100 * 1024 * 1024)) {
2818 spin_lock(&block_group->lock);
2819 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2820 spin_unlock(&block_group->lock);
2821 return 0;
2822 }
2823
2824 again:
2825 inode = lookup_free_space_inode(root, block_group, path);
2826 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2827 ret = PTR_ERR(inode);
2828 btrfs_release_path(path);
2829 goto out;
2830 }
2831
2832 if (IS_ERR(inode)) {
2833 BUG_ON(retries);
2834 retries++;
2835
2836 if (block_group->ro)
2837 goto out_free;
2838
2839 ret = create_free_space_inode(root, trans, block_group, path);
2840 if (ret)
2841 goto out_free;
2842 goto again;
2843 }
2844
2845 /* We've already setup this transaction, go ahead and exit */
2846 if (block_group->cache_generation == trans->transid &&
2847 i_size_read(inode)) {
2848 dcs = BTRFS_DC_SETUP;
2849 goto out_put;
2850 }
2851
2852 /*
2853 * We want to set the generation to 0, that way if anything goes wrong
2854 * from here on out we know not to trust this cache when we load up next
2855 * time.
2856 */
2857 BTRFS_I(inode)->generation = 0;
2858 ret = btrfs_update_inode(trans, root, inode);
2859 WARN_ON(ret);
2860
2861 if (i_size_read(inode) > 0) {
2862 ret = btrfs_truncate_free_space_cache(root, trans, path,
2863 inode);
2864 if (ret)
2865 goto out_put;
2866 }
2867
2868 spin_lock(&block_group->lock);
2869 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2870 /* We're not cached, don't bother trying to write stuff out */
2871 dcs = BTRFS_DC_WRITTEN;
2872 spin_unlock(&block_group->lock);
2873 goto out_put;
2874 }
2875 spin_unlock(&block_group->lock);
2876
2877 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2878 if (!num_pages)
2879 num_pages = 1;
2880
2881 /*
2882 * Just to make absolutely sure we have enough space, we're going to
2883 * preallocate 12 pages worth of space for each block group. In
2884 * practice we ought to use at most 8, but we need extra space so we can
2885 * add our header and have a terminator between the extents and the
2886 * bitmaps.
2887 */
2888 num_pages *= 16;
2889 num_pages *= PAGE_CACHE_SIZE;
2890
2891 ret = btrfs_check_data_free_space(inode, num_pages);
2892 if (ret)
2893 goto out_put;
2894
2895 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2896 num_pages, num_pages,
2897 &alloc_hint);
2898 if (!ret)
2899 dcs = BTRFS_DC_SETUP;
2900 btrfs_free_reserved_data_space(inode, num_pages);
2901
2902 out_put:
2903 iput(inode);
2904 out_free:
2905 btrfs_release_path(path);
2906 out:
2907 spin_lock(&block_group->lock);
2908 if (!ret && dcs == BTRFS_DC_SETUP)
2909 block_group->cache_generation = trans->transid;
2910 block_group->disk_cache_state = dcs;
2911 spin_unlock(&block_group->lock);
2912
2913 return ret;
2914 }
2915
2916 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2917 struct btrfs_root *root)
2918 {
2919 struct btrfs_block_group_cache *cache;
2920 int err = 0;
2921 struct btrfs_path *path;
2922 u64 last = 0;
2923
2924 path = btrfs_alloc_path();
2925 if (!path)
2926 return -ENOMEM;
2927
2928 again:
2929 while (1) {
2930 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2931 while (cache) {
2932 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2933 break;
2934 cache = next_block_group(root, cache);
2935 }
2936 if (!cache) {
2937 if (last == 0)
2938 break;
2939 last = 0;
2940 continue;
2941 }
2942 err = cache_save_setup(cache, trans, path);
2943 last = cache->key.objectid + cache->key.offset;
2944 btrfs_put_block_group(cache);
2945 }
2946
2947 while (1) {
2948 if (last == 0) {
2949 err = btrfs_run_delayed_refs(trans, root,
2950 (unsigned long)-1);
2951 BUG_ON(err);
2952 }
2953
2954 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2955 while (cache) {
2956 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2957 btrfs_put_block_group(cache);
2958 goto again;
2959 }
2960
2961 if (cache->dirty)
2962 break;
2963 cache = next_block_group(root, cache);
2964 }
2965 if (!cache) {
2966 if (last == 0)
2967 break;
2968 last = 0;
2969 continue;
2970 }
2971
2972 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2973 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2974 cache->dirty = 0;
2975 last = cache->key.objectid + cache->key.offset;
2976
2977 err = write_one_cache_group(trans, root, path, cache);
2978 BUG_ON(err);
2979 btrfs_put_block_group(cache);
2980 }
2981
2982 while (1) {
2983 /*
2984 * I don't think this is needed since we're just marking our
2985 * preallocated extent as written, but just in case it can't
2986 * hurt.
2987 */
2988 if (last == 0) {
2989 err = btrfs_run_delayed_refs(trans, root,
2990 (unsigned long)-1);
2991 BUG_ON(err);
2992 }
2993
2994 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2995 while (cache) {
2996 /*
2997 * Really this shouldn't happen, but it could if we
2998 * couldn't write the entire preallocated extent and
2999 * splitting the extent resulted in a new block.
3000 */
3001 if (cache->dirty) {
3002 btrfs_put_block_group(cache);
3003 goto again;
3004 }
3005 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3006 break;
3007 cache = next_block_group(root, cache);
3008 }
3009 if (!cache) {
3010 if (last == 0)
3011 break;
3012 last = 0;
3013 continue;
3014 }
3015
3016 btrfs_write_out_cache(root, trans, cache, path);
3017
3018 /*
3019 * If we didn't have an error then the cache state is still
3020 * NEED_WRITE, so we can set it to WRITTEN.
3021 */
3022 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
3023 cache->disk_cache_state = BTRFS_DC_WRITTEN;
3024 last = cache->key.objectid + cache->key.offset;
3025 btrfs_put_block_group(cache);
3026 }
3027
3028 btrfs_free_path(path);
3029 return 0;
3030 }
3031
3032 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3033 {
3034 struct btrfs_block_group_cache *block_group;
3035 int readonly = 0;
3036
3037 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3038 if (!block_group || block_group->ro)
3039 readonly = 1;
3040 if (block_group)
3041 btrfs_put_block_group(block_group);
3042 return readonly;
3043 }
3044
3045 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3046 u64 total_bytes, u64 bytes_used,
3047 struct btrfs_space_info **space_info)
3048 {
3049 struct btrfs_space_info *found;
3050 int i;
3051 int factor;
3052
3053 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3054 BTRFS_BLOCK_GROUP_RAID10))
3055 factor = 2;
3056 else
3057 factor = 1;
3058
3059 found = __find_space_info(info, flags);
3060 if (found) {
3061 spin_lock(&found->lock);
3062 found->total_bytes += total_bytes;
3063 found->disk_total += total_bytes * factor;
3064 found->bytes_used += bytes_used;
3065 found->disk_used += bytes_used * factor;
3066 found->full = 0;
3067 spin_unlock(&found->lock);
3068 *space_info = found;
3069 return 0;
3070 }
3071 found = kzalloc(sizeof(*found), GFP_NOFS);
3072 if (!found)
3073 return -ENOMEM;
3074
3075 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3076 INIT_LIST_HEAD(&found->block_groups[i]);
3077 init_rwsem(&found->groups_sem);
3078 spin_lock_init(&found->lock);
3079 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3080 found->total_bytes = total_bytes;
3081 found->disk_total = total_bytes * factor;
3082 found->bytes_used = bytes_used;
3083 found->disk_used = bytes_used * factor;
3084 found->bytes_pinned = 0;
3085 found->bytes_reserved = 0;
3086 found->bytes_readonly = 0;
3087 found->bytes_may_use = 0;
3088 found->full = 0;
3089 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3090 found->chunk_alloc = 0;
3091 found->flush = 0;
3092 init_waitqueue_head(&found->wait);
3093 *space_info = found;
3094 list_add_rcu(&found->list, &info->space_info);
3095 return 0;
3096 }
3097
3098 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3099 {
3100 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
3101
3102 /* chunk -> extended profile */
3103 if (extra_flags == 0)
3104 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3105
3106 if (flags & BTRFS_BLOCK_GROUP_DATA)
3107 fs_info->avail_data_alloc_bits |= extra_flags;
3108 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3109 fs_info->avail_metadata_alloc_bits |= extra_flags;
3110 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3111 fs_info->avail_system_alloc_bits |= extra_flags;
3112 }
3113
3114 /*
3115 * @flags: available profiles in extended format (see ctree.h)
3116 *
3117 * Returns reduced profile in chunk format. If profile changing is in
3118 * progress (either running or paused) picks the target profile (if it's
3119 * already available), otherwise falls back to plain reducing.
3120 */
3121 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3122 {
3123 /*
3124 * we add in the count of missing devices because we want
3125 * to make sure that any RAID levels on a degraded FS
3126 * continue to be honored.
3127 */
3128 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3129 root->fs_info->fs_devices->missing_devices;
3130
3131 /* pick restriper's target profile if it's available */
3132 spin_lock(&root->fs_info->balance_lock);
3133 if (root->fs_info->balance_ctl) {
3134 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
3135 u64 tgt = 0;
3136
3137 if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
3138 (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3139 (flags & bctl->data.target)) {
3140 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3141 } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
3142 (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3143 (flags & bctl->sys.target)) {
3144 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3145 } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
3146 (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3147 (flags & bctl->meta.target)) {
3148 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3149 }
3150
3151 if (tgt) {
3152 spin_unlock(&root->fs_info->balance_lock);
3153 flags = tgt;
3154 goto out;
3155 }
3156 }
3157 spin_unlock(&root->fs_info->balance_lock);
3158
3159 if (num_devices == 1)
3160 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3161 if (num_devices < 4)
3162 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3163
3164 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3165 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3166 BTRFS_BLOCK_GROUP_RAID10))) {
3167 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3168 }
3169
3170 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3171 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3172 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3173 }
3174
3175 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3176 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3177 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3178 (flags & BTRFS_BLOCK_GROUP_DUP))) {
3179 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3180 }
3181
3182 out:
3183 /* extended -> chunk profile */
3184 flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3185 return flags;
3186 }
3187
3188 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3189 {
3190 if (flags & BTRFS_BLOCK_GROUP_DATA)
3191 flags |= root->fs_info->avail_data_alloc_bits;
3192 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3193 flags |= root->fs_info->avail_system_alloc_bits;
3194 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3195 flags |= root->fs_info->avail_metadata_alloc_bits;
3196
3197 return btrfs_reduce_alloc_profile(root, flags);
3198 }
3199
3200 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3201 {
3202 u64 flags;
3203
3204 if (data)
3205 flags = BTRFS_BLOCK_GROUP_DATA;
3206 else if (root == root->fs_info->chunk_root)
3207 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3208 else
3209 flags = BTRFS_BLOCK_GROUP_METADATA;
3210
3211 return get_alloc_profile(root, flags);
3212 }
3213
3214 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3215 {
3216 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3217 BTRFS_BLOCK_GROUP_DATA);
3218 }
3219
3220 /*
3221 * This will check the space that the inode allocates from to make sure we have
3222 * enough space for bytes.
3223 */
3224 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3225 {
3226 struct btrfs_space_info *data_sinfo;
3227 struct btrfs_root *root = BTRFS_I(inode)->root;
3228 u64 used;
3229 int ret = 0, committed = 0, alloc_chunk = 1;
3230
3231 /* make sure bytes are sectorsize aligned */
3232 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3233
3234 if (root == root->fs_info->tree_root ||
3235 BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
3236 alloc_chunk = 0;
3237 committed = 1;
3238 }
3239
3240 data_sinfo = BTRFS_I(inode)->space_info;
3241 if (!data_sinfo)
3242 goto alloc;
3243
3244 again:
3245 /* make sure we have enough space to handle the data first */
3246 spin_lock(&data_sinfo->lock);
3247 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3248 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3249 data_sinfo->bytes_may_use;
3250
3251 if (used + bytes > data_sinfo->total_bytes) {
3252 struct btrfs_trans_handle *trans;
3253
3254 /*
3255 * if we don't have enough free bytes in this space then we need
3256 * to alloc a new chunk.
3257 */
3258 if (!data_sinfo->full && alloc_chunk) {
3259 u64 alloc_target;
3260
3261 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
3262 spin_unlock(&data_sinfo->lock);
3263 alloc:
3264 alloc_target = btrfs_get_alloc_profile(root, 1);
3265 trans = btrfs_join_transaction(root);
3266 if (IS_ERR(trans))
3267 return PTR_ERR(trans);
3268
3269 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3270 bytes + 2 * 1024 * 1024,
3271 alloc_target,
3272 CHUNK_ALLOC_NO_FORCE);
3273 btrfs_end_transaction(trans, root);
3274 if (ret < 0) {
3275 if (ret != -ENOSPC)
3276 return ret;
3277 else
3278 goto commit_trans;
3279 }
3280
3281 if (!data_sinfo) {
3282 btrfs_set_inode_space_info(root, inode);
3283 data_sinfo = BTRFS_I(inode)->space_info;
3284 }
3285 goto again;
3286 }
3287
3288 /*
3289 * If we have less pinned bytes than we want to allocate then
3290 * don't bother committing the transaction, it won't help us.
3291 */
3292 if (data_sinfo->bytes_pinned < bytes)
3293 committed = 1;
3294 spin_unlock(&data_sinfo->lock);
3295
3296 /* commit the current transaction and try again */
3297 commit_trans:
3298 if (!committed &&
3299 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3300 committed = 1;
3301 trans = btrfs_join_transaction(root);
3302 if (IS_ERR(trans))
3303 return PTR_ERR(trans);
3304 ret = btrfs_commit_transaction(trans, root);
3305 if (ret)
3306 return ret;
3307 goto again;
3308 }
3309
3310 return -ENOSPC;
3311 }
3312 data_sinfo->bytes_may_use += bytes;
3313 spin_unlock(&data_sinfo->lock);
3314
3315 return 0;
3316 }
3317
3318 /*
3319 * Called if we need to clear a data reservation for this inode.
3320 */
3321 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3322 {
3323 struct btrfs_root *root = BTRFS_I(inode)->root;
3324 struct btrfs_space_info *data_sinfo;
3325
3326 /* make sure bytes are sectorsize aligned */
3327 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3328
3329 data_sinfo = BTRFS_I(inode)->space_info;
3330 spin_lock(&data_sinfo->lock);
3331 data_sinfo->bytes_may_use -= bytes;
3332 spin_unlock(&data_sinfo->lock);
3333 }
3334
3335 static void force_metadata_allocation(struct btrfs_fs_info *info)
3336 {
3337 struct list_head *head = &info->space_info;
3338 struct btrfs_space_info *found;
3339
3340 rcu_read_lock();
3341 list_for_each_entry_rcu(found, head, list) {
3342 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3343 found->force_alloc = CHUNK_ALLOC_FORCE;
3344 }
3345 rcu_read_unlock();
3346 }
3347
3348 static int should_alloc_chunk(struct btrfs_root *root,
3349 struct btrfs_space_info *sinfo, u64 alloc_bytes,
3350 int force)
3351 {
3352 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3353 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3354 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
3355 u64 thresh;
3356
3357 if (force == CHUNK_ALLOC_FORCE)
3358 return 1;
3359
3360 /*
3361 * We need to take into account the global rsv because for all intents
3362 * and purposes it's used space. Don't worry about locking the
3363 * global_rsv, it doesn't change except when the transaction commits.
3364 */
3365 num_allocated += global_rsv->size;
3366
3367 /*
3368 * in limited mode, we want to have some free space up to
3369 * about 1% of the FS size.
3370 */
3371 if (force == CHUNK_ALLOC_LIMITED) {
3372 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3373 thresh = max_t(u64, 64 * 1024 * 1024,
3374 div_factor_fine(thresh, 1));
3375
3376 if (num_bytes - num_allocated < thresh)
3377 return 1;
3378 }
3379 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
3380
3381 /* 256MB or 2% of the FS */
3382 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
3383
3384 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
3385 return 0;
3386 return 1;
3387 }
3388
3389 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3390 struct btrfs_root *extent_root, u64 alloc_bytes,
3391 u64 flags, int force)
3392 {
3393 struct btrfs_space_info *space_info;
3394 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3395 int wait_for_alloc = 0;
3396 int ret = 0;
3397
3398 BUG_ON(!profile_is_valid(flags, 0));
3399
3400 space_info = __find_space_info(extent_root->fs_info, flags);
3401 if (!space_info) {
3402 ret = update_space_info(extent_root->fs_info, flags,
3403 0, 0, &space_info);
3404 BUG_ON(ret);
3405 }
3406 BUG_ON(!space_info);
3407
3408 again:
3409 spin_lock(&space_info->lock);
3410 if (space_info->force_alloc)
3411 force = space_info->force_alloc;
3412 if (space_info->full) {
3413 spin_unlock(&space_info->lock);
3414 return 0;
3415 }
3416
3417 if (!should_alloc_chunk(extent_root, space_info, alloc_bytes, force)) {
3418 spin_unlock(&space_info->lock);
3419 return 0;
3420 } else if (space_info->chunk_alloc) {
3421 wait_for_alloc = 1;
3422 } else {
3423 space_info->chunk_alloc = 1;
3424 }
3425
3426 spin_unlock(&space_info->lock);
3427
3428 mutex_lock(&fs_info->chunk_mutex);
3429
3430 /*
3431 * The chunk_mutex is held throughout the entirety of a chunk
3432 * allocation, so once we've acquired the chunk_mutex we know that the
3433 * other guy is done and we need to recheck and see if we should
3434 * allocate.
3435 */
3436 if (wait_for_alloc) {
3437 mutex_unlock(&fs_info->chunk_mutex);
3438 wait_for_alloc = 0;
3439 goto again;
3440 }
3441
3442 /*
3443 * If we have mixed data/metadata chunks we want to make sure we keep
3444 * allocating mixed chunks instead of individual chunks.
3445 */
3446 if (btrfs_mixed_space_info(space_info))
3447 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3448
3449 /*
3450 * if we're doing a data chunk, go ahead and make sure that
3451 * we keep a reasonable number of metadata chunks allocated in the
3452 * FS as well.
3453 */
3454 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3455 fs_info->data_chunk_allocations++;
3456 if (!(fs_info->data_chunk_allocations %
3457 fs_info->metadata_ratio))
3458 force_metadata_allocation(fs_info);
3459 }
3460
3461 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3462 if (ret < 0 && ret != -ENOSPC)
3463 goto out;
3464
3465 spin_lock(&space_info->lock);
3466 if (ret)
3467 space_info->full = 1;
3468 else
3469 ret = 1;
3470
3471 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3472 space_info->chunk_alloc = 0;
3473 spin_unlock(&space_info->lock);
3474 out:
3475 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3476 return ret;
3477 }
3478
3479 /*
3480 * shrink metadata reservation for delalloc
3481 */
3482 static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
3483 bool wait_ordered)
3484 {
3485 struct btrfs_block_rsv *block_rsv;
3486 struct btrfs_space_info *space_info;
3487 struct btrfs_trans_handle *trans;
3488 u64 reserved;
3489 u64 max_reclaim;
3490 u64 reclaimed = 0;
3491 long time_left;
3492 unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3493 int loops = 0;
3494 unsigned long progress;
3495
3496 trans = (struct btrfs_trans_handle *)current->journal_info;
3497 block_rsv = &root->fs_info->delalloc_block_rsv;
3498 space_info = block_rsv->space_info;
3499
3500 smp_mb();
3501 reserved = space_info->bytes_may_use;
3502 progress = space_info->reservation_progress;
3503
3504 if (reserved == 0)
3505 return 0;
3506
3507 smp_mb();
3508 if (root->fs_info->delalloc_bytes == 0) {
3509 if (trans)
3510 return 0;
3511 btrfs_wait_ordered_extents(root, 0, 0);
3512 return 0;
3513 }
3514
3515 max_reclaim = min(reserved, to_reclaim);
3516 nr_pages = max_t(unsigned long, nr_pages,
3517 max_reclaim >> PAGE_CACHE_SHIFT);
3518 while (loops < 1024) {
3519 /* have the flusher threads jump in and do some IO */
3520 smp_mb();
3521 nr_pages = min_t(unsigned long, nr_pages,
3522 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3523 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages,
3524 WB_REASON_FS_FREE_SPACE);
3525
3526 spin_lock(&space_info->lock);
3527 if (reserved > space_info->bytes_may_use)
3528 reclaimed += reserved - space_info->bytes_may_use;
3529 reserved = space_info->bytes_may_use;
3530 spin_unlock(&space_info->lock);
3531
3532 loops++;
3533
3534 if (reserved == 0 || reclaimed >= max_reclaim)
3535 break;
3536
3537 if (trans && trans->transaction->blocked)
3538 return -EAGAIN;
3539
3540 if (wait_ordered && !trans) {
3541 btrfs_wait_ordered_extents(root, 0, 0);
3542 } else {
3543 time_left = schedule_timeout_interruptible(1);
3544
3545 /* We were interrupted, exit */
3546 if (time_left)
3547 break;
3548 }
3549
3550 /* we've kicked the IO a few times, if anything has been freed,
3551 * exit. There is no sense in looping here for a long time
3552 * when we really need to commit the transaction, or there are
3553 * just too many writers without enough free space
3554 */
3555
3556 if (loops > 3) {
3557 smp_mb();
3558 if (progress != space_info->reservation_progress)
3559 break;
3560 }
3561
3562 }
3563
3564 return reclaimed >= to_reclaim;
3565 }
3566
3567 /**
3568 * maybe_commit_transaction - possibly commit the transaction if its ok to
3569 * @root - the root we're allocating for
3570 * @bytes - the number of bytes we want to reserve
3571 * @force - force the commit
3572 *
3573 * This will check to make sure that committing the transaction will actually
3574 * get us somewhere and then commit the transaction if it does. Otherwise it
3575 * will return -ENOSPC.
3576 */
3577 static int may_commit_transaction(struct btrfs_root *root,
3578 struct btrfs_space_info *space_info,
3579 u64 bytes, int force)
3580 {
3581 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
3582 struct btrfs_trans_handle *trans;
3583
3584 trans = (struct btrfs_trans_handle *)current->journal_info;
3585 if (trans)
3586 return -EAGAIN;
3587
3588 if (force)
3589 goto commit;
3590
3591 /* See if there is enough pinned space to make this reservation */
3592 spin_lock(&space_info->lock);
3593 if (space_info->bytes_pinned >= bytes) {
3594 spin_unlock(&space_info->lock);
3595 goto commit;
3596 }
3597 spin_unlock(&space_info->lock);
3598
3599 /*
3600 * See if there is some space in the delayed insertion reservation for
3601 * this reservation.
3602 */
3603 if (space_info != delayed_rsv->space_info)
3604 return -ENOSPC;
3605
3606 spin_lock(&delayed_rsv->lock);
3607 if (delayed_rsv->size < bytes) {
3608 spin_unlock(&delayed_rsv->lock);
3609 return -ENOSPC;
3610 }
3611 spin_unlock(&delayed_rsv->lock);
3612
3613 commit:
3614 trans = btrfs_join_transaction(root);
3615 if (IS_ERR(trans))
3616 return -ENOSPC;
3617
3618 return btrfs_commit_transaction(trans, root);
3619 }
3620
3621 /**
3622 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
3623 * @root - the root we're allocating for
3624 * @block_rsv - the block_rsv we're allocating for
3625 * @orig_bytes - the number of bytes we want
3626 * @flush - wether or not we can flush to make our reservation
3627 *
3628 * This will reserve orgi_bytes number of bytes from the space info associated
3629 * with the block_rsv. If there is not enough space it will make an attempt to
3630 * flush out space to make room. It will do this by flushing delalloc if
3631 * possible or committing the transaction. If flush is 0 then no attempts to
3632 * regain reservations will be made and this will fail if there is not enough
3633 * space already.
3634 */
3635 static int reserve_metadata_bytes(struct btrfs_root *root,
3636 struct btrfs_block_rsv *block_rsv,
3637 u64 orig_bytes, int flush)
3638 {
3639 struct btrfs_space_info *space_info = block_rsv->space_info;
3640 u64 used;
3641 u64 num_bytes = orig_bytes;
3642 int retries = 0;
3643 int ret = 0;
3644 bool committed = false;
3645 bool flushing = false;
3646 bool wait_ordered = false;
3647
3648 again:
3649 ret = 0;
3650 spin_lock(&space_info->lock);
3651 /*
3652 * We only want to wait if somebody other than us is flushing and we are
3653 * actually alloed to flush.
3654 */
3655 while (flush && !flushing && space_info->flush) {
3656 spin_unlock(&space_info->lock);
3657 /*
3658 * If we have a trans handle we can't wait because the flusher
3659 * may have to commit the transaction, which would mean we would
3660 * deadlock since we are waiting for the flusher to finish, but
3661 * hold the current transaction open.
3662 */
3663 if (current->journal_info)
3664 return -EAGAIN;
3665 ret = wait_event_interruptible(space_info->wait,
3666 !space_info->flush);
3667 /* Must have been interrupted, return */
3668 if (ret)
3669 return -EINTR;
3670
3671 spin_lock(&space_info->lock);
3672 }
3673
3674 ret = -ENOSPC;
3675 used = space_info->bytes_used + space_info->bytes_reserved +
3676 space_info->bytes_pinned + space_info->bytes_readonly +
3677 space_info->bytes_may_use;
3678
3679 /*
3680 * The idea here is that we've not already over-reserved the block group
3681 * then we can go ahead and save our reservation first and then start
3682 * flushing if we need to. Otherwise if we've already overcommitted
3683 * lets start flushing stuff first and then come back and try to make
3684 * our reservation.
3685 */
3686 if (used <= space_info->total_bytes) {
3687 if (used + orig_bytes <= space_info->total_bytes) {
3688 space_info->bytes_may_use += orig_bytes;
3689 ret = 0;
3690 } else {
3691 /*
3692 * Ok set num_bytes to orig_bytes since we aren't
3693 * overocmmitted, this way we only try and reclaim what
3694 * we need.
3695 */
3696 num_bytes = orig_bytes;
3697 }
3698 } else {
3699 /*
3700 * Ok we're over committed, set num_bytes to the overcommitted
3701 * amount plus the amount of bytes that we need for this
3702 * reservation.
3703 */
3704 wait_ordered = true;
3705 num_bytes = used - space_info->total_bytes +
3706 (orig_bytes * (retries + 1));
3707 }
3708
3709 if (ret) {
3710 u64 profile = btrfs_get_alloc_profile(root, 0);
3711 u64 avail;
3712
3713 /*
3714 * If we have a lot of space that's pinned, don't bother doing
3715 * the overcommit dance yet and just commit the transaction.
3716 */
3717 avail = (space_info->total_bytes - space_info->bytes_used) * 8;
3718 do_div(avail, 10);
3719 if (space_info->bytes_pinned >= avail && flush && !committed) {
3720 space_info->flush = 1;
3721 flushing = true;
3722 spin_unlock(&space_info->lock);
3723 ret = may_commit_transaction(root, space_info,
3724 orig_bytes, 1);
3725 if (ret)
3726 goto out;
3727 committed = true;
3728 goto again;
3729 }
3730
3731 spin_lock(&root->fs_info->free_chunk_lock);
3732 avail = root->fs_info->free_chunk_space;
3733
3734 /*
3735 * If we have dup, raid1 or raid10 then only half of the free
3736 * space is actually useable.
3737 */
3738 if (profile & (BTRFS_BLOCK_GROUP_DUP |
3739 BTRFS_BLOCK_GROUP_RAID1 |
3740 BTRFS_BLOCK_GROUP_RAID10))
3741 avail >>= 1;
3742
3743 /*
3744 * If we aren't flushing don't let us overcommit too much, say
3745 * 1/8th of the space. If we can flush, let it overcommit up to
3746 * 1/2 of the space.
3747 */
3748 if (flush)
3749 avail >>= 3;
3750 else
3751 avail >>= 1;
3752 spin_unlock(&root->fs_info->free_chunk_lock);
3753
3754 if (used + num_bytes < space_info->total_bytes + avail) {
3755 space_info->bytes_may_use += orig_bytes;
3756 ret = 0;
3757 } else {
3758 wait_ordered = true;
3759 }
3760 }
3761
3762 /*
3763 * Couldn't make our reservation, save our place so while we're trying
3764 * to reclaim space we can actually use it instead of somebody else
3765 * stealing it from us.
3766 */
3767 if (ret && flush) {
3768 flushing = true;
3769 space_info->flush = 1;
3770 }
3771
3772 spin_unlock(&space_info->lock);
3773
3774 if (!ret || !flush)
3775 goto out;
3776
3777 /*
3778 * We do synchronous shrinking since we don't actually unreserve
3779 * metadata until after the IO is completed.
3780 */
3781 ret = shrink_delalloc(root, num_bytes, wait_ordered);
3782 if (ret < 0)
3783 goto out;
3784
3785 ret = 0;
3786
3787 /*
3788 * So if we were overcommitted it's possible that somebody else flushed
3789 * out enough space and we simply didn't have enough space to reclaim,
3790 * so go back around and try again.
3791 */
3792 if (retries < 2) {
3793 wait_ordered = true;
3794 retries++;
3795 goto again;
3796 }
3797
3798 ret = -ENOSPC;
3799 if (committed)
3800 goto out;
3801
3802 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
3803 if (!ret) {
3804 committed = true;
3805 goto again;
3806 }
3807
3808 out:
3809 if (flushing) {
3810 spin_lock(&space_info->lock);
3811 space_info->flush = 0;
3812 wake_up_all(&space_info->wait);
3813 spin_unlock(&space_info->lock);
3814 }
3815 return ret;
3816 }
3817
3818 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3819 struct btrfs_root *root)
3820 {
3821 struct btrfs_block_rsv *block_rsv = NULL;
3822
3823 if (root->ref_cows || root == root->fs_info->csum_root)
3824 block_rsv = trans->block_rsv;
3825
3826 if (!block_rsv)
3827 block_rsv = root->block_rsv;
3828
3829 if (!block_rsv)
3830 block_rsv = &root->fs_info->empty_block_rsv;
3831
3832 return block_rsv;
3833 }
3834
3835 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3836 u64 num_bytes)
3837 {
3838 int ret = -ENOSPC;
3839 spin_lock(&block_rsv->lock);
3840 if (block_rsv->reserved >= num_bytes) {
3841 block_rsv->reserved -= num_bytes;
3842 if (block_rsv->reserved < block_rsv->size)
3843 block_rsv->full = 0;
3844 ret = 0;
3845 }
3846 spin_unlock(&block_rsv->lock);
3847 return ret;
3848 }
3849
3850 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3851 u64 num_bytes, int update_size)
3852 {
3853 spin_lock(&block_rsv->lock);
3854 block_rsv->reserved += num_bytes;
3855 if (update_size)
3856 block_rsv->size += num_bytes;
3857 else if (block_rsv->reserved >= block_rsv->size)
3858 block_rsv->full = 1;
3859 spin_unlock(&block_rsv->lock);
3860 }
3861
3862 static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3863 struct btrfs_block_rsv *dest, u64 num_bytes)
3864 {
3865 struct btrfs_space_info *space_info = block_rsv->space_info;
3866
3867 spin_lock(&block_rsv->lock);
3868 if (num_bytes == (u64)-1)
3869 num_bytes = block_rsv->size;
3870 block_rsv->size -= num_bytes;
3871 if (block_rsv->reserved >= block_rsv->size) {
3872 num_bytes = block_rsv->reserved - block_rsv->size;
3873 block_rsv->reserved = block_rsv->size;
3874 block_rsv->full = 1;
3875 } else {
3876 num_bytes = 0;
3877 }
3878 spin_unlock(&block_rsv->lock);
3879
3880 if (num_bytes > 0) {
3881 if (dest) {
3882 spin_lock(&dest->lock);
3883 if (!dest->full) {
3884 u64 bytes_to_add;
3885
3886 bytes_to_add = dest->size - dest->reserved;
3887 bytes_to_add = min(num_bytes, bytes_to_add);
3888 dest->reserved += bytes_to_add;
3889 if (dest->reserved >= dest->size)
3890 dest->full = 1;
3891 num_bytes -= bytes_to_add;
3892 }
3893 spin_unlock(&dest->lock);
3894 }
3895 if (num_bytes) {
3896 spin_lock(&space_info->lock);
3897 space_info->bytes_may_use -= num_bytes;
3898 space_info->reservation_progress++;
3899 spin_unlock(&space_info->lock);
3900 }
3901 }
3902 }
3903
3904 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3905 struct btrfs_block_rsv *dst, u64 num_bytes)
3906 {
3907 int ret;
3908
3909 ret = block_rsv_use_bytes(src, num_bytes);
3910 if (ret)
3911 return ret;
3912
3913 block_rsv_add_bytes(dst, num_bytes, 1);
3914 return 0;
3915 }
3916
3917 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3918 {
3919 memset(rsv, 0, sizeof(*rsv));
3920 spin_lock_init(&rsv->lock);
3921 }
3922
3923 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3924 {
3925 struct btrfs_block_rsv *block_rsv;
3926 struct btrfs_fs_info *fs_info = root->fs_info;
3927
3928 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3929 if (!block_rsv)
3930 return NULL;
3931
3932 btrfs_init_block_rsv(block_rsv);
3933 block_rsv->space_info = __find_space_info(fs_info,
3934 BTRFS_BLOCK_GROUP_METADATA);
3935 return block_rsv;
3936 }
3937
3938 void btrfs_free_block_rsv(struct btrfs_root *root,
3939 struct btrfs_block_rsv *rsv)
3940 {
3941 btrfs_block_rsv_release(root, rsv, (u64)-1);
3942 kfree(rsv);
3943 }
3944
3945 static inline int __block_rsv_add(struct btrfs_root *root,
3946 struct btrfs_block_rsv *block_rsv,
3947 u64 num_bytes, int flush)
3948 {
3949 int ret;
3950
3951 if (num_bytes == 0)
3952 return 0;
3953
3954 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
3955 if (!ret) {
3956 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3957 return 0;
3958 }
3959
3960 return ret;
3961 }
3962
3963 int btrfs_block_rsv_add(struct btrfs_root *root,
3964 struct btrfs_block_rsv *block_rsv,
3965 u64 num_bytes)
3966 {
3967 return __block_rsv_add(root, block_rsv, num_bytes, 1);
3968 }
3969
3970 int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
3971 struct btrfs_block_rsv *block_rsv,
3972 u64 num_bytes)
3973 {
3974 return __block_rsv_add(root, block_rsv, num_bytes, 0);
3975 }
3976
3977 int btrfs_block_rsv_check(struct btrfs_root *root,
3978 struct btrfs_block_rsv *block_rsv, int min_factor)
3979 {
3980 u64 num_bytes = 0;
3981 int ret = -ENOSPC;
3982
3983 if (!block_rsv)
3984 return 0;
3985
3986 spin_lock(&block_rsv->lock);
3987 num_bytes = div_factor(block_rsv->size, min_factor);
3988 if (block_rsv->reserved >= num_bytes)
3989 ret = 0;
3990 spin_unlock(&block_rsv->lock);
3991
3992 return ret;
3993 }
3994
3995 static inline int __btrfs_block_rsv_refill(struct btrfs_root *root,
3996 struct btrfs_block_rsv *block_rsv,
3997 u64 min_reserved, int flush)
3998 {
3999 u64 num_bytes = 0;
4000 int ret = -ENOSPC;
4001
4002 if (!block_rsv)
4003 return 0;
4004
4005 spin_lock(&block_rsv->lock);
4006 num_bytes = min_reserved;
4007 if (block_rsv->reserved >= num_bytes)
4008 ret = 0;
4009 else
4010 num_bytes -= block_rsv->reserved;
4011 spin_unlock(&block_rsv->lock);
4012
4013 if (!ret)
4014 return 0;
4015
4016 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
4017 if (!ret) {
4018 block_rsv_add_bytes(block_rsv, num_bytes, 0);
4019 return 0;
4020 }
4021
4022 return ret;
4023 }
4024
4025 int btrfs_block_rsv_refill(struct btrfs_root *root,
4026 struct btrfs_block_rsv *block_rsv,
4027 u64 min_reserved)
4028 {
4029 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 1);
4030 }
4031
4032 int btrfs_block_rsv_refill_noflush(struct btrfs_root *root,
4033 struct btrfs_block_rsv *block_rsv,
4034 u64 min_reserved)
4035 {
4036 return __btrfs_block_rsv_refill(root, block_rsv, min_reserved, 0);
4037 }
4038
4039 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
4040 struct btrfs_block_rsv *dst_rsv,
4041 u64 num_bytes)
4042 {
4043 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4044 }
4045
4046 void btrfs_block_rsv_release(struct btrfs_root *root,
4047 struct btrfs_block_rsv *block_rsv,
4048 u64 num_bytes)
4049 {
4050 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4051 if (global_rsv->full || global_rsv == block_rsv ||
4052 block_rsv->space_info != global_rsv->space_info)
4053 global_rsv = NULL;
4054 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
4055 }
4056
4057 /*
4058 * helper to calculate size of global block reservation.
4059 * the desired value is sum of space used by extent tree,
4060 * checksum tree and root tree
4061 */
4062 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4063 {
4064 struct btrfs_space_info *sinfo;
4065 u64 num_bytes;
4066 u64 meta_used;
4067 u64 data_used;
4068 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
4069
4070 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
4071 spin_lock(&sinfo->lock);
4072 data_used = sinfo->bytes_used;
4073 spin_unlock(&sinfo->lock);
4074
4075 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4076 spin_lock(&sinfo->lock);
4077 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
4078 data_used = 0;
4079 meta_used = sinfo->bytes_used;
4080 spin_unlock(&sinfo->lock);
4081
4082 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
4083 csum_size * 2;
4084 num_bytes += div64_u64(data_used + meta_used, 50);
4085
4086 if (num_bytes * 3 > meta_used)
4087 num_bytes = div64_u64(meta_used, 3);
4088
4089 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4090 }
4091
4092 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
4093 {
4094 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
4095 struct btrfs_space_info *sinfo = block_rsv->space_info;
4096 u64 num_bytes;
4097
4098 num_bytes = calc_global_metadata_size(fs_info);
4099
4100 spin_lock(&block_rsv->lock);
4101 spin_lock(&sinfo->lock);
4102
4103 block_rsv->size = num_bytes;
4104
4105 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
4106 sinfo->bytes_reserved + sinfo->bytes_readonly +
4107 sinfo->bytes_may_use;
4108
4109 if (sinfo->total_bytes > num_bytes) {
4110 num_bytes = sinfo->total_bytes - num_bytes;
4111 block_rsv->reserved += num_bytes;
4112 sinfo->bytes_may_use += num_bytes;
4113 }
4114
4115 if (block_rsv->reserved >= block_rsv->size) {
4116 num_bytes = block_rsv->reserved - block_rsv->size;
4117 sinfo->bytes_may_use -= num_bytes;
4118 sinfo->reservation_progress++;
4119 block_rsv->reserved = block_rsv->size;
4120 block_rsv->full = 1;
4121 }
4122
4123 spin_unlock(&sinfo->lock);
4124 spin_unlock(&block_rsv->lock);
4125 }
4126
4127 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4128 {
4129 struct btrfs_space_info *space_info;
4130
4131 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4132 fs_info->chunk_block_rsv.space_info = space_info;
4133
4134 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4135 fs_info->global_block_rsv.space_info = space_info;
4136 fs_info->delalloc_block_rsv.space_info = space_info;
4137 fs_info->trans_block_rsv.space_info = space_info;
4138 fs_info->empty_block_rsv.space_info = space_info;
4139 fs_info->delayed_block_rsv.space_info = space_info;
4140
4141 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
4142 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4143 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4144 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4145 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4146
4147 update_global_block_rsv(fs_info);
4148 }
4149
4150 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
4151 {
4152 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
4153 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
4154 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
4155 WARN_ON(fs_info->trans_block_rsv.size > 0);
4156 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
4157 WARN_ON(fs_info->chunk_block_rsv.size > 0);
4158 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
4159 WARN_ON(fs_info->delayed_block_rsv.size > 0);
4160 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
4161 }
4162
4163 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
4164 struct btrfs_root *root)
4165 {
4166 if (!trans->bytes_reserved)
4167 return;
4168
4169 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
4170 trans->bytes_reserved = 0;
4171 }
4172
4173 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
4174 struct inode *inode)
4175 {
4176 struct btrfs_root *root = BTRFS_I(inode)->root;
4177 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4178 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
4179
4180 /*
4181 * We need to hold space in order to delete our orphan item once we've
4182 * added it, so this takes the reservation so we can release it later
4183 * when we are truly done with the orphan item.
4184 */
4185 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4186 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4187 }
4188
4189 void btrfs_orphan_release_metadata(struct inode *inode)
4190 {
4191 struct btrfs_root *root = BTRFS_I(inode)->root;
4192 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
4193 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
4194 }
4195
4196 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
4197 struct btrfs_pending_snapshot *pending)
4198 {
4199 struct btrfs_root *root = pending->root;
4200 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
4201 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
4202 /*
4203 * two for root back/forward refs, two for directory entries
4204 * and one for root of the snapshot.
4205 */
4206 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
4207 dst_rsv->space_info = src_rsv->space_info;
4208 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
4209 }
4210
4211 /**
4212 * drop_outstanding_extent - drop an outstanding extent
4213 * @inode: the inode we're dropping the extent for
4214 *
4215 * This is called when we are freeing up an outstanding extent, either called
4216 * after an error or after an extent is written. This will return the number of
4217 * reserved extents that need to be freed. This must be called with
4218 * BTRFS_I(inode)->lock held.
4219 */
4220 static unsigned drop_outstanding_extent(struct inode *inode)
4221 {
4222 unsigned drop_inode_space = 0;
4223 unsigned dropped_extents = 0;
4224
4225 BUG_ON(!BTRFS_I(inode)->outstanding_extents);
4226 BTRFS_I(inode)->outstanding_extents--;
4227
4228 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4229 BTRFS_I(inode)->delalloc_meta_reserved) {
4230 drop_inode_space = 1;
4231 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4232 }
4233
4234 /*
4235 * If we have more or the same amount of outsanding extents than we have
4236 * reserved then we need to leave the reserved extents count alone.
4237 */
4238 if (BTRFS_I(inode)->outstanding_extents >=
4239 BTRFS_I(inode)->reserved_extents)
4240 return drop_inode_space;
4241
4242 dropped_extents = BTRFS_I(inode)->reserved_extents -
4243 BTRFS_I(inode)->outstanding_extents;
4244 BTRFS_I(inode)->reserved_extents -= dropped_extents;
4245 return dropped_extents + drop_inode_space;
4246 }
4247
4248 /**
4249 * calc_csum_metadata_size - return the amount of metada space that must be
4250 * reserved/free'd for the given bytes.
4251 * @inode: the inode we're manipulating
4252 * @num_bytes: the number of bytes in question
4253 * @reserve: 1 if we are reserving space, 0 if we are freeing space
4254 *
4255 * This adjusts the number of csum_bytes in the inode and then returns the
4256 * correct amount of metadata that must either be reserved or freed. We
4257 * calculate how many checksums we can fit into one leaf and then divide the
4258 * number of bytes that will need to be checksumed by this value to figure out
4259 * how many checksums will be required. If we are adding bytes then the number
4260 * may go up and we will return the number of additional bytes that must be
4261 * reserved. If it is going down we will return the number of bytes that must
4262 * be freed.
4263 *
4264 * This must be called with BTRFS_I(inode)->lock held.
4265 */
4266 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
4267 int reserve)
4268 {
4269 struct btrfs_root *root = BTRFS_I(inode)->root;
4270 u64 csum_size;
4271 int num_csums_per_leaf;
4272 int num_csums;
4273 int old_csums;
4274
4275 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
4276 BTRFS_I(inode)->csum_bytes == 0)
4277 return 0;
4278
4279 old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4280 if (reserve)
4281 BTRFS_I(inode)->csum_bytes += num_bytes;
4282 else
4283 BTRFS_I(inode)->csum_bytes -= num_bytes;
4284 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
4285 num_csums_per_leaf = (int)div64_u64(csum_size,
4286 sizeof(struct btrfs_csum_item) +
4287 sizeof(struct btrfs_disk_key));
4288 num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
4289 num_csums = num_csums + num_csums_per_leaf - 1;
4290 num_csums = num_csums / num_csums_per_leaf;
4291
4292 old_csums = old_csums + num_csums_per_leaf - 1;
4293 old_csums = old_csums / num_csums_per_leaf;
4294
4295 /* No change, no need to reserve more */
4296 if (old_csums == num_csums)
4297 return 0;
4298
4299 if (reserve)
4300 return btrfs_calc_trans_metadata_size(root,
4301 num_csums - old_csums);
4302
4303 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
4304 }
4305
4306 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4307 {
4308 struct btrfs_root *root = BTRFS_I(inode)->root;
4309 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
4310 u64 to_reserve = 0;
4311 u64 csum_bytes;
4312 unsigned nr_extents = 0;
4313 int extra_reserve = 0;
4314 int flush = 1;
4315 int ret;
4316
4317 /* Need to be holding the i_mutex here if we aren't free space cache */
4318 if (btrfs_is_free_space_inode(root, inode))
4319 flush = 0;
4320 else
4321 WARN_ON(!mutex_is_locked(&inode->i_mutex));
4322
4323 if (flush && btrfs_transaction_in_commit(root->fs_info))
4324 schedule_timeout(1);
4325
4326 num_bytes = ALIGN(num_bytes, root->sectorsize);
4327
4328 spin_lock(&BTRFS_I(inode)->lock);
4329 BTRFS_I(inode)->outstanding_extents++;
4330
4331 if (BTRFS_I(inode)->outstanding_extents >
4332 BTRFS_I(inode)->reserved_extents)
4333 nr_extents = BTRFS_I(inode)->outstanding_extents -
4334 BTRFS_I(inode)->reserved_extents;
4335
4336 /*
4337 * Add an item to reserve for updating the inode when we complete the
4338 * delalloc io.
4339 */
4340 if (!BTRFS_I(inode)->delalloc_meta_reserved) {
4341 nr_extents++;
4342 extra_reserve = 1;
4343 }
4344
4345 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
4346 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
4347 csum_bytes = BTRFS_I(inode)->csum_bytes;
4348 spin_unlock(&BTRFS_I(inode)->lock);
4349
4350 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
4351 if (ret) {
4352 u64 to_free = 0;
4353 unsigned dropped;
4354
4355 spin_lock(&BTRFS_I(inode)->lock);
4356 dropped = drop_outstanding_extent(inode);
4357 /*
4358 * If the inodes csum_bytes is the same as the original
4359 * csum_bytes then we know we haven't raced with any free()ers
4360 * so we can just reduce our inodes csum bytes and carry on.
4361 * Otherwise we have to do the normal free thing to account for
4362 * the case that the free side didn't free up its reserve
4363 * because of this outstanding reservation.
4364 */
4365 if (BTRFS_I(inode)->csum_bytes == csum_bytes)
4366 calc_csum_metadata_size(inode, num_bytes, 0);
4367 else
4368 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4369 spin_unlock(&BTRFS_I(inode)->lock);
4370 if (dropped)
4371 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4372
4373 if (to_free)
4374 btrfs_block_rsv_release(root, block_rsv, to_free);
4375 return ret;
4376 }
4377
4378 spin_lock(&BTRFS_I(inode)->lock);
4379 if (extra_reserve) {
4380 BTRFS_I(inode)->delalloc_meta_reserved = 1;
4381 nr_extents--;
4382 }
4383 BTRFS_I(inode)->reserved_extents += nr_extents;
4384 spin_unlock(&BTRFS_I(inode)->lock);
4385
4386 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4387
4388 return 0;
4389 }
4390
4391 /**
4392 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
4393 * @inode: the inode to release the reservation for
4394 * @num_bytes: the number of bytes we're releasing
4395 *
4396 * This will release the metadata reservation for an inode. This can be called
4397 * once we complete IO for a given set of bytes to release their metadata
4398 * reservations.
4399 */
4400 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4401 {
4402 struct btrfs_root *root = BTRFS_I(inode)->root;
4403 u64 to_free = 0;
4404 unsigned dropped;
4405
4406 num_bytes = ALIGN(num_bytes, root->sectorsize);
4407 spin_lock(&BTRFS_I(inode)->lock);
4408 dropped = drop_outstanding_extent(inode);
4409
4410 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
4411 spin_unlock(&BTRFS_I(inode)->lock);
4412 if (dropped > 0)
4413 to_free += btrfs_calc_trans_metadata_size(root, dropped);
4414
4415 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4416 to_free);
4417 }
4418
4419 /**
4420 * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
4421 * @inode: inode we're writing to
4422 * @num_bytes: the number of bytes we want to allocate
4423 *
4424 * This will do the following things
4425 *
4426 * o reserve space in the data space info for num_bytes
4427 * o reserve space in the metadata space info based on number of outstanding
4428 * extents and how much csums will be needed
4429 * o add to the inodes ->delalloc_bytes
4430 * o add it to the fs_info's delalloc inodes list.
4431 *
4432 * This will return 0 for success and -ENOSPC if there is no space left.
4433 */
4434 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4435 {
4436 int ret;
4437
4438 ret = btrfs_check_data_free_space(inode, num_bytes);
4439 if (ret)
4440 return ret;
4441
4442 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4443 if (ret) {
4444 btrfs_free_reserved_data_space(inode, num_bytes);
4445 return ret;
4446 }
4447
4448 return 0;
4449 }
4450
4451 /**
4452 * btrfs_delalloc_release_space - release data and metadata space for delalloc
4453 * @inode: inode we're releasing space for
4454 * @num_bytes: the number of bytes we want to free up
4455 *
4456 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
4457 * called in the case that we don't need the metadata AND data reservations
4458 * anymore. So if there is an error or we insert an inline extent.
4459 *
4460 * This function will release the metadata space that was not used and will
4461 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
4462 * list if there are no delalloc bytes left.
4463 */
4464 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4465 {
4466 btrfs_delalloc_release_metadata(inode, num_bytes);
4467 btrfs_free_reserved_data_space(inode, num_bytes);
4468 }
4469
4470 static int update_block_group(struct btrfs_trans_handle *trans,
4471 struct btrfs_root *root,
4472 u64 bytenr, u64 num_bytes, int alloc)
4473 {
4474 struct btrfs_block_group_cache *cache = NULL;
4475 struct btrfs_fs_info *info = root->fs_info;
4476 u64 total = num_bytes;
4477 u64 old_val;
4478 u64 byte_in_group;
4479 int factor;
4480
4481 /* block accounting for super block */
4482 spin_lock(&info->delalloc_lock);
4483 old_val = btrfs_super_bytes_used(info->super_copy);
4484 if (alloc)
4485 old_val += num_bytes;
4486 else
4487 old_val -= num_bytes;
4488 btrfs_set_super_bytes_used(info->super_copy, old_val);
4489 spin_unlock(&info->delalloc_lock);
4490
4491 while (total) {
4492 cache = btrfs_lookup_block_group(info, bytenr);
4493 if (!cache)
4494 return -1;
4495 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4496 BTRFS_BLOCK_GROUP_RAID1 |
4497 BTRFS_BLOCK_GROUP_RAID10))
4498 factor = 2;
4499 else
4500 factor = 1;
4501 /*
4502 * If this block group has free space cache written out, we
4503 * need to make sure to load it if we are removing space. This
4504 * is because we need the unpinning stage to actually add the
4505 * space back to the block group, otherwise we will leak space.
4506 */
4507 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4508 cache_block_group(cache, trans, NULL, 1);
4509
4510 byte_in_group = bytenr - cache->key.objectid;
4511 WARN_ON(byte_in_group > cache->key.offset);
4512
4513 spin_lock(&cache->space_info->lock);
4514 spin_lock(&cache->lock);
4515
4516 if (btrfs_test_opt(root, SPACE_CACHE) &&
4517 cache->disk_cache_state < BTRFS_DC_CLEAR)
4518 cache->disk_cache_state = BTRFS_DC_CLEAR;
4519
4520 cache->dirty = 1;
4521 old_val = btrfs_block_group_used(&cache->item);
4522 num_bytes = min(total, cache->key.offset - byte_in_group);
4523 if (alloc) {
4524 old_val += num_bytes;
4525 btrfs_set_block_group_used(&cache->item, old_val);
4526 cache->reserved -= num_bytes;
4527 cache->space_info->bytes_reserved -= num_bytes;
4528 cache->space_info->bytes_used += num_bytes;
4529 cache->space_info->disk_used += num_bytes * factor;
4530 spin_unlock(&cache->lock);
4531 spin_unlock(&cache->space_info->lock);
4532 } else {
4533 old_val -= num_bytes;
4534 btrfs_set_block_group_used(&cache->item, old_val);
4535 cache->pinned += num_bytes;
4536 cache->space_info->bytes_pinned += num_bytes;
4537 cache->space_info->bytes_used -= num_bytes;
4538 cache->space_info->disk_used -= num_bytes * factor;
4539 spin_unlock(&cache->lock);
4540 spin_unlock(&cache->space_info->lock);
4541
4542 set_extent_dirty(info->pinned_extents,
4543 bytenr, bytenr + num_bytes - 1,
4544 GFP_NOFS | __GFP_NOFAIL);
4545 }
4546 btrfs_put_block_group(cache);
4547 total -= num_bytes;
4548 bytenr += num_bytes;
4549 }
4550 return 0;
4551 }
4552
4553 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4554 {
4555 struct btrfs_block_group_cache *cache;
4556 u64 bytenr;
4557
4558 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4559 if (!cache)
4560 return 0;
4561
4562 bytenr = cache->key.objectid;
4563 btrfs_put_block_group(cache);
4564
4565 return bytenr;
4566 }
4567
4568 static int pin_down_extent(struct btrfs_root *root,
4569 struct btrfs_block_group_cache *cache,
4570 u64 bytenr, u64 num_bytes, int reserved)
4571 {
4572 spin_lock(&cache->space_info->lock);
4573 spin_lock(&cache->lock);
4574 cache->pinned += num_bytes;
4575 cache->space_info->bytes_pinned += num_bytes;
4576 if (reserved) {
4577 cache->reserved -= num_bytes;
4578 cache->space_info->bytes_reserved -= num_bytes;
4579 }
4580 spin_unlock(&cache->lock);
4581 spin_unlock(&cache->space_info->lock);
4582
4583 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4584 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4585 return 0;
4586 }
4587
4588 /*
4589 * this function must be called within transaction
4590 */
4591 int btrfs_pin_extent(struct btrfs_root *root,
4592 u64 bytenr, u64 num_bytes, int reserved)
4593 {
4594 struct btrfs_block_group_cache *cache;
4595
4596 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4597 BUG_ON(!cache);
4598
4599 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4600
4601 btrfs_put_block_group(cache);
4602 return 0;
4603 }
4604
4605 /*
4606 * this function must be called within transaction
4607 */
4608 int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
4609 struct btrfs_root *root,
4610 u64 bytenr, u64 num_bytes)
4611 {
4612 struct btrfs_block_group_cache *cache;
4613
4614 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4615 BUG_ON(!cache);
4616
4617 /*
4618 * pull in the free space cache (if any) so that our pin
4619 * removes the free space from the cache. We have load_only set
4620 * to one because the slow code to read in the free extents does check
4621 * the pinned extents.
4622 */
4623 cache_block_group(cache, trans, root, 1);
4624
4625 pin_down_extent(root, cache, bytenr, num_bytes, 0);
4626
4627 /* remove us from the free space cache (if we're there at all) */
4628 btrfs_remove_free_space(cache, bytenr, num_bytes);
4629 btrfs_put_block_group(cache);
4630 return 0;
4631 }
4632
4633 /**
4634 * btrfs_update_reserved_bytes - update the block_group and space info counters
4635 * @cache: The cache we are manipulating
4636 * @num_bytes: The number of bytes in question
4637 * @reserve: One of the reservation enums
4638 *
4639 * This is called by the allocator when it reserves space, or by somebody who is
4640 * freeing space that was never actually used on disk. For example if you
4641 * reserve some space for a new leaf in transaction A and before transaction A
4642 * commits you free that leaf, you call this with reserve set to 0 in order to
4643 * clear the reservation.
4644 *
4645 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
4646 * ENOSPC accounting. For data we handle the reservation through clearing the
4647 * delalloc bits in the io_tree. We have to do this since we could end up
4648 * allocating less disk space for the amount of data we have reserved in the
4649 * case of compression.
4650 *
4651 * If this is a reservation and the block group has become read only we cannot
4652 * make the reservation and return -EAGAIN, otherwise this function always
4653 * succeeds.
4654 */
4655 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
4656 u64 num_bytes, int reserve)
4657 {
4658 struct btrfs_space_info *space_info = cache->space_info;
4659 int ret = 0;
4660 spin_lock(&space_info->lock);
4661 spin_lock(&cache->lock);
4662 if (reserve != RESERVE_FREE) {
4663 if (cache->ro) {
4664 ret = -EAGAIN;
4665 } else {
4666 cache->reserved += num_bytes;
4667 space_info->bytes_reserved += num_bytes;
4668 if (reserve == RESERVE_ALLOC) {
4669 BUG_ON(space_info->bytes_may_use < num_bytes);
4670 space_info->bytes_may_use -= num_bytes;
4671 }
4672 }
4673 } else {
4674 if (cache->ro)
4675 space_info->bytes_readonly += num_bytes;
4676 cache->reserved -= num_bytes;
4677 space_info->bytes_reserved -= num_bytes;
4678 space_info->reservation_progress++;
4679 }
4680 spin_unlock(&cache->lock);
4681 spin_unlock(&space_info->lock);
4682 return ret;
4683 }
4684
4685 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4686 struct btrfs_root *root)
4687 {
4688 struct btrfs_fs_info *fs_info = root->fs_info;
4689 struct btrfs_caching_control *next;
4690 struct btrfs_caching_control *caching_ctl;
4691 struct btrfs_block_group_cache *cache;
4692
4693 down_write(&fs_info->extent_commit_sem);
4694
4695 list_for_each_entry_safe(caching_ctl, next,
4696 &fs_info->caching_block_groups, list) {
4697 cache = caching_ctl->block_group;
4698 if (block_group_cache_done(cache)) {
4699 cache->last_byte_to_unpin = (u64)-1;
4700 list_del_init(&caching_ctl->list);
4701 put_caching_control(caching_ctl);
4702 } else {
4703 cache->last_byte_to_unpin = caching_ctl->progress;
4704 }
4705 }
4706
4707 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4708 fs_info->pinned_extents = &fs_info->freed_extents[1];
4709 else
4710 fs_info->pinned_extents = &fs_info->freed_extents[0];
4711
4712 up_write(&fs_info->extent_commit_sem);
4713
4714 update_global_block_rsv(fs_info);
4715 return 0;
4716 }
4717
4718 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4719 {
4720 struct btrfs_fs_info *fs_info = root->fs_info;
4721 struct btrfs_block_group_cache *cache = NULL;
4722 u64 len;
4723
4724 while (start <= end) {
4725 if (!cache ||
4726 start >= cache->key.objectid + cache->key.offset) {
4727 if (cache)
4728 btrfs_put_block_group(cache);
4729 cache = btrfs_lookup_block_group(fs_info, start);
4730 BUG_ON(!cache);
4731 }
4732
4733 len = cache->key.objectid + cache->key.offset - start;
4734 len = min(len, end + 1 - start);
4735
4736 if (start < cache->last_byte_to_unpin) {
4737 len = min(len, cache->last_byte_to_unpin - start);
4738 btrfs_add_free_space(cache, start, len);
4739 }
4740
4741 start += len;
4742
4743 spin_lock(&cache->space_info->lock);
4744 spin_lock(&cache->lock);
4745 cache->pinned -= len;
4746 cache->space_info->bytes_pinned -= len;
4747 if (cache->ro)
4748 cache->space_info->bytes_readonly += len;
4749 spin_unlock(&cache->lock);
4750 spin_unlock(&cache->space_info->lock);
4751 }
4752
4753 if (cache)
4754 btrfs_put_block_group(cache);
4755 return 0;
4756 }
4757
4758 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4759 struct btrfs_root *root)
4760 {
4761 struct btrfs_fs_info *fs_info = root->fs_info;
4762 struct extent_io_tree *unpin;
4763 u64 start;
4764 u64 end;
4765 int ret;
4766
4767 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4768 unpin = &fs_info->freed_extents[1];
4769 else
4770 unpin = &fs_info->freed_extents[0];
4771
4772 while (1) {
4773 ret = find_first_extent_bit(unpin, 0, &start, &end,
4774 EXTENT_DIRTY);
4775 if (ret)
4776 break;
4777
4778 if (btrfs_test_opt(root, DISCARD))
4779 ret = btrfs_discard_extent(root, start,
4780 end + 1 - start, NULL);
4781
4782 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4783 unpin_extent_range(root, start, end);
4784 cond_resched();
4785 }
4786
4787 return 0;
4788 }
4789
4790 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4791 struct btrfs_root *root,
4792 u64 bytenr, u64 num_bytes, u64 parent,
4793 u64 root_objectid, u64 owner_objectid,
4794 u64 owner_offset, int refs_to_drop,
4795 struct btrfs_delayed_extent_op *extent_op)
4796 {
4797 struct btrfs_key key;
4798 struct btrfs_path *path;
4799 struct btrfs_fs_info *info = root->fs_info;
4800 struct btrfs_root *extent_root = info->extent_root;
4801 struct extent_buffer *leaf;
4802 struct btrfs_extent_item *ei;
4803 struct btrfs_extent_inline_ref *iref;
4804 int ret;
4805 int is_data;
4806 int extent_slot = 0;
4807 int found_extent = 0;
4808 int num_to_del = 1;
4809 u32 item_size;
4810 u64 refs;
4811
4812 path = btrfs_alloc_path();
4813 if (!path)
4814 return -ENOMEM;
4815
4816 path->reada = 1;
4817 path->leave_spinning = 1;
4818
4819 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4820 BUG_ON(!is_data && refs_to_drop != 1);
4821
4822 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4823 bytenr, num_bytes, parent,
4824 root_objectid, owner_objectid,
4825 owner_offset);
4826 if (ret == 0) {
4827 extent_slot = path->slots[0];
4828 while (extent_slot >= 0) {
4829 btrfs_item_key_to_cpu(path->nodes[0], &key,
4830 extent_slot);
4831 if (key.objectid != bytenr)
4832 break;
4833 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4834 key.offset == num_bytes) {
4835 found_extent = 1;
4836 break;
4837 }
4838 if (path->slots[0] - extent_slot > 5)
4839 break;
4840 extent_slot--;
4841 }
4842 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4843 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4844 if (found_extent && item_size < sizeof(*ei))
4845 found_extent = 0;
4846 #endif
4847 if (!found_extent) {
4848 BUG_ON(iref);
4849 ret = remove_extent_backref(trans, extent_root, path,
4850 NULL, refs_to_drop,
4851 is_data);
4852 BUG_ON(ret);
4853 btrfs_release_path(path);
4854 path->leave_spinning = 1;
4855
4856 key.objectid = bytenr;
4857 key.type = BTRFS_EXTENT_ITEM_KEY;
4858 key.offset = num_bytes;
4859
4860 ret = btrfs_search_slot(trans, extent_root,
4861 &key, path, -1, 1);
4862 if (ret) {
4863 printk(KERN_ERR "umm, got %d back from search"
4864 ", was looking for %llu\n", ret,
4865 (unsigned long long)bytenr);
4866 if (ret > 0)
4867 btrfs_print_leaf(extent_root,
4868 path->nodes[0]);
4869 }
4870 BUG_ON(ret);
4871 extent_slot = path->slots[0];
4872 }
4873 } else {
4874 btrfs_print_leaf(extent_root, path->nodes[0]);
4875 WARN_ON(1);
4876 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4877 "parent %llu root %llu owner %llu offset %llu\n",
4878 (unsigned long long)bytenr,
4879 (unsigned long long)parent,
4880 (unsigned long long)root_objectid,
4881 (unsigned long long)owner_objectid,
4882 (unsigned long long)owner_offset);
4883 }
4884
4885 leaf = path->nodes[0];
4886 item_size = btrfs_item_size_nr(leaf, extent_slot);
4887 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4888 if (item_size < sizeof(*ei)) {
4889 BUG_ON(found_extent || extent_slot != path->slots[0]);
4890 ret = convert_extent_item_v0(trans, extent_root, path,
4891 owner_objectid, 0);
4892 BUG_ON(ret < 0);
4893
4894 btrfs_release_path(path);
4895 path->leave_spinning = 1;
4896
4897 key.objectid = bytenr;
4898 key.type = BTRFS_EXTENT_ITEM_KEY;
4899 key.offset = num_bytes;
4900
4901 ret = btrfs_search_slot(trans, extent_root, &key, path,
4902 -1, 1);
4903 if (ret) {
4904 printk(KERN_ERR "umm, got %d back from search"
4905 ", was looking for %llu\n", ret,
4906 (unsigned long long)bytenr);
4907 btrfs_print_leaf(extent_root, path->nodes[0]);
4908 }
4909 BUG_ON(ret);
4910 extent_slot = path->slots[0];
4911 leaf = path->nodes[0];
4912 item_size = btrfs_item_size_nr(leaf, extent_slot);
4913 }
4914 #endif
4915 BUG_ON(item_size < sizeof(*ei));
4916 ei = btrfs_item_ptr(leaf, extent_slot,
4917 struct btrfs_extent_item);
4918 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4919 struct btrfs_tree_block_info *bi;
4920 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4921 bi = (struct btrfs_tree_block_info *)(ei + 1);
4922 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4923 }
4924
4925 refs = btrfs_extent_refs(leaf, ei);
4926 BUG_ON(refs < refs_to_drop);
4927 refs -= refs_to_drop;
4928
4929 if (refs > 0) {
4930 if (extent_op)
4931 __run_delayed_extent_op(extent_op, leaf, ei);
4932 /*
4933 * In the case of inline back ref, reference count will
4934 * be updated by remove_extent_backref
4935 */
4936 if (iref) {
4937 BUG_ON(!found_extent);
4938 } else {
4939 btrfs_set_extent_refs(leaf, ei, refs);
4940 btrfs_mark_buffer_dirty(leaf);
4941 }
4942 if (found_extent) {
4943 ret = remove_extent_backref(trans, extent_root, path,
4944 iref, refs_to_drop,
4945 is_data);
4946 BUG_ON(ret);
4947 }
4948 } else {
4949 if (found_extent) {
4950 BUG_ON(is_data && refs_to_drop !=
4951 extent_data_ref_count(root, path, iref));
4952 if (iref) {
4953 BUG_ON(path->slots[0] != extent_slot);
4954 } else {
4955 BUG_ON(path->slots[0] != extent_slot + 1);
4956 path->slots[0] = extent_slot;
4957 num_to_del = 2;
4958 }
4959 }
4960
4961 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4962 num_to_del);
4963 BUG_ON(ret);
4964 btrfs_release_path(path);
4965
4966 if (is_data) {
4967 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4968 BUG_ON(ret);
4969 } else {
4970 invalidate_mapping_pages(info->btree_inode->i_mapping,
4971 bytenr >> PAGE_CACHE_SHIFT,
4972 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4973 }
4974
4975 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4976 BUG_ON(ret);
4977 }
4978 btrfs_free_path(path);
4979 return ret;
4980 }
4981
4982 /*
4983 * when we free an block, it is possible (and likely) that we free the last
4984 * delayed ref for that extent as well. This searches the delayed ref tree for
4985 * a given extent, and if there are no other delayed refs to be processed, it
4986 * removes it from the tree.
4987 */
4988 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4989 struct btrfs_root *root, u64 bytenr)
4990 {
4991 struct btrfs_delayed_ref_head *head;
4992 struct btrfs_delayed_ref_root *delayed_refs;
4993 struct btrfs_delayed_ref_node *ref;
4994 struct rb_node *node;
4995 int ret = 0;
4996
4997 delayed_refs = &trans->transaction->delayed_refs;
4998 spin_lock(&delayed_refs->lock);
4999 head = btrfs_find_delayed_ref_head(trans, bytenr);
5000 if (!head)
5001 goto out;
5002
5003 node = rb_prev(&head->node.rb_node);
5004 if (!node)
5005 goto out;
5006
5007 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
5008
5009 /* there are still entries for this ref, we can't drop it */
5010 if (ref->bytenr == bytenr)
5011 goto out;
5012
5013 if (head->extent_op) {
5014 if (!head->must_insert_reserved)
5015 goto out;
5016 kfree(head->extent_op);
5017 head->extent_op = NULL;
5018 }
5019
5020 /*
5021 * waiting for the lock here would deadlock. If someone else has it
5022 * locked they are already in the process of dropping it anyway
5023 */
5024 if (!mutex_trylock(&head->mutex))
5025 goto out;
5026
5027 /*
5028 * at this point we have a head with no other entries. Go
5029 * ahead and process it.
5030 */
5031 head->node.in_tree = 0;
5032 rb_erase(&head->node.rb_node, &delayed_refs->root);
5033
5034 delayed_refs->num_entries--;
5035 if (waitqueue_active(&delayed_refs->seq_wait))
5036 wake_up(&delayed_refs->seq_wait);
5037
5038 /*
5039 * we don't take a ref on the node because we're removing it from the
5040 * tree, so we just steal the ref the tree was holding.
5041 */
5042 delayed_refs->num_heads--;
5043 if (list_empty(&head->cluster))
5044 delayed_refs->num_heads_ready--;
5045
5046 list_del_init(&head->cluster);
5047 spin_unlock(&delayed_refs->lock);
5048
5049 BUG_ON(head->extent_op);
5050 if (head->must_insert_reserved)
5051 ret = 1;
5052
5053 mutex_unlock(&head->mutex);
5054 btrfs_put_delayed_ref(&head->node);
5055 return ret;
5056 out:
5057 spin_unlock(&delayed_refs->lock);
5058 return 0;
5059 }
5060
5061 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5062 struct btrfs_root *root,
5063 struct extent_buffer *buf,
5064 u64 parent, int last_ref, int for_cow)
5065 {
5066 struct btrfs_block_group_cache *cache = NULL;
5067 int ret;
5068
5069 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5070 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
5071 buf->start, buf->len,
5072 parent, root->root_key.objectid,
5073 btrfs_header_level(buf),
5074 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5075 BUG_ON(ret);
5076 }
5077
5078 if (!last_ref)
5079 return;
5080
5081 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
5082
5083 if (btrfs_header_generation(buf) == trans->transid) {
5084 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
5085 ret = check_ref_cleanup(trans, root, buf->start);
5086 if (!ret)
5087 goto out;
5088 }
5089
5090 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
5091 pin_down_extent(root, cache, buf->start, buf->len, 1);
5092 goto out;
5093 }
5094
5095 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
5096
5097 btrfs_add_free_space(cache, buf->start, buf->len);
5098 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
5099 }
5100 out:
5101 /*
5102 * Deleting the buffer, clear the corrupt flag since it doesn't matter
5103 * anymore.
5104 */
5105 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
5106 btrfs_put_block_group(cache);
5107 }
5108
5109 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
5110 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
5111 u64 owner, u64 offset, int for_cow)
5112 {
5113 int ret;
5114 struct btrfs_fs_info *fs_info = root->fs_info;
5115
5116 /*
5117 * tree log blocks never actually go into the extent allocation
5118 * tree, just update pinning info and exit early.
5119 */
5120 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
5121 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
5122 /* unlocks the pinned mutex */
5123 btrfs_pin_extent(root, bytenr, num_bytes, 1);
5124 ret = 0;
5125 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
5126 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
5127 num_bytes,
5128 parent, root_objectid, (int)owner,
5129 BTRFS_DROP_DELAYED_REF, NULL, for_cow);
5130 BUG_ON(ret);
5131 } else {
5132 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
5133 num_bytes,
5134 parent, root_objectid, owner,
5135 offset, BTRFS_DROP_DELAYED_REF,
5136 NULL, for_cow);
5137 BUG_ON(ret);
5138 }
5139 return ret;
5140 }
5141
5142 static u64 stripe_align(struct btrfs_root *root, u64 val)
5143 {
5144 u64 mask = ((u64)root->stripesize - 1);
5145 u64 ret = (val + mask) & ~mask;
5146 return ret;
5147 }
5148
5149 /*
5150 * when we wait for progress in the block group caching, its because
5151 * our allocation attempt failed at least once. So, we must sleep
5152 * and let some progress happen before we try again.
5153 *
5154 * This function will sleep at least once waiting for new free space to
5155 * show up, and then it will check the block group free space numbers
5156 * for our min num_bytes. Another option is to have it go ahead
5157 * and look in the rbtree for a free extent of a given size, but this
5158 * is a good start.
5159 */
5160 static noinline int
5161 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
5162 u64 num_bytes)
5163 {
5164 struct btrfs_caching_control *caching_ctl;
5165 DEFINE_WAIT(wait);
5166
5167 caching_ctl = get_caching_control(cache);
5168 if (!caching_ctl)
5169 return 0;
5170
5171 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
5172 (cache->free_space_ctl->free_space >= num_bytes));
5173
5174 put_caching_control(caching_ctl);
5175 return 0;
5176 }
5177
5178 static noinline int
5179 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
5180 {
5181 struct btrfs_caching_control *caching_ctl;
5182 DEFINE_WAIT(wait);
5183
5184 caching_ctl = get_caching_control(cache);
5185 if (!caching_ctl)
5186 return 0;
5187
5188 wait_event(caching_ctl->wait, block_group_cache_done(cache));
5189
5190 put_caching_control(caching_ctl);
5191 return 0;
5192 }
5193
5194 static int get_block_group_index(struct btrfs_block_group_cache *cache)
5195 {
5196 int index;
5197 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
5198 index = 0;
5199 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
5200 index = 1;
5201 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
5202 index = 2;
5203 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
5204 index = 3;
5205 else
5206 index = 4;
5207 return index;
5208 }
5209
5210 enum btrfs_loop_type {
5211 LOOP_FIND_IDEAL = 0,
5212 LOOP_CACHING_NOWAIT = 1,
5213 LOOP_CACHING_WAIT = 2,
5214 LOOP_ALLOC_CHUNK = 3,
5215 LOOP_NO_EMPTY_SIZE = 4,
5216 };
5217
5218 /*
5219 * walks the btree of allocated extents and find a hole of a given size.
5220 * The key ins is changed to record the hole:
5221 * ins->objectid == block start
5222 * ins->flags = BTRFS_EXTENT_ITEM_KEY
5223 * ins->offset == number of blocks
5224 * Any available blocks before search_start are skipped.
5225 */
5226 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
5227 struct btrfs_root *orig_root,
5228 u64 num_bytes, u64 empty_size,
5229 u64 search_start, u64 search_end,
5230 u64 hint_byte, struct btrfs_key *ins,
5231 u64 data)
5232 {
5233 int ret = 0;
5234 struct btrfs_root *root = orig_root->fs_info->extent_root;
5235 struct btrfs_free_cluster *last_ptr = NULL;
5236 struct btrfs_block_group_cache *block_group = NULL;
5237 struct btrfs_block_group_cache *used_block_group;
5238 int empty_cluster = 2 * 1024 * 1024;
5239 int allowed_chunk_alloc = 0;
5240 int done_chunk_alloc = 0;
5241 struct btrfs_space_info *space_info;
5242 int loop = 0;
5243 int index = 0;
5244 int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
5245 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
5246 bool found_uncached_bg = false;
5247 bool failed_cluster_refill = false;
5248 bool failed_alloc = false;
5249 bool use_cluster = true;
5250 bool have_caching_bg = false;
5251 u64 ideal_cache_percent = 0;
5252 u64 ideal_cache_offset = 0;
5253
5254 WARN_ON(num_bytes < root->sectorsize);
5255 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
5256 ins->objectid = 0;
5257 ins->offset = 0;
5258
5259 space_info = __find_space_info(root->fs_info, data);
5260 if (!space_info) {
5261 printk(KERN_ERR "No space info for %llu\n", data);
5262 return -ENOSPC;
5263 }
5264
5265 /*
5266 * If the space info is for both data and metadata it means we have a
5267 * small filesystem and we can't use the clustering stuff.
5268 */
5269 if (btrfs_mixed_space_info(space_info))
5270 use_cluster = false;
5271
5272 if (orig_root->ref_cows || empty_size)
5273 allowed_chunk_alloc = 1;
5274
5275 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
5276 last_ptr = &root->fs_info->meta_alloc_cluster;
5277 if (!btrfs_test_opt(root, SSD))
5278 empty_cluster = 64 * 1024;
5279 }
5280
5281 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
5282 btrfs_test_opt(root, SSD)) {
5283 last_ptr = &root->fs_info->data_alloc_cluster;
5284 }
5285
5286 if (last_ptr) {
5287 spin_lock(&last_ptr->lock);
5288 if (last_ptr->block_group)
5289 hint_byte = last_ptr->window_start;
5290 spin_unlock(&last_ptr->lock);
5291 }
5292
5293 search_start = max(search_start, first_logical_byte(root, 0));
5294 search_start = max(search_start, hint_byte);
5295
5296 if (!last_ptr)
5297 empty_cluster = 0;
5298
5299 if (search_start == hint_byte) {
5300 ideal_cache:
5301 block_group = btrfs_lookup_block_group(root->fs_info,
5302 search_start);
5303 used_block_group = block_group;
5304 /*
5305 * we don't want to use the block group if it doesn't match our
5306 * allocation bits, or if its not cached.
5307 *
5308 * However if we are re-searching with an ideal block group
5309 * picked out then we don't care that the block group is cached.
5310 */
5311 if (block_group && block_group_bits(block_group, data) &&
5312 (block_group->cached != BTRFS_CACHE_NO ||
5313 search_start == ideal_cache_offset)) {
5314 down_read(&space_info->groups_sem);
5315 if (list_empty(&block_group->list) ||
5316 block_group->ro) {
5317 /*
5318 * someone is removing this block group,
5319 * we can't jump into the have_block_group
5320 * target because our list pointers are not
5321 * valid
5322 */
5323 btrfs_put_block_group(block_group);
5324 up_read(&space_info->groups_sem);
5325 } else {
5326 index = get_block_group_index(block_group);
5327 goto have_block_group;
5328 }
5329 } else if (block_group) {
5330 btrfs_put_block_group(block_group);
5331 }
5332 }
5333 search:
5334 have_caching_bg = false;
5335 down_read(&space_info->groups_sem);
5336 list_for_each_entry(block_group, &space_info->block_groups[index],
5337 list) {
5338 u64 offset;
5339 int cached;
5340
5341 used_block_group = block_group;
5342 btrfs_get_block_group(block_group);
5343 search_start = block_group->key.objectid;
5344
5345 /*
5346 * this can happen if we end up cycling through all the
5347 * raid types, but we want to make sure we only allocate
5348 * for the proper type.
5349 */
5350 if (!block_group_bits(block_group, data)) {
5351 u64 extra = BTRFS_BLOCK_GROUP_DUP |
5352 BTRFS_BLOCK_GROUP_RAID1 |
5353 BTRFS_BLOCK_GROUP_RAID10;
5354
5355 /*
5356 * if they asked for extra copies and this block group
5357 * doesn't provide them, bail. This does allow us to
5358 * fill raid0 from raid1.
5359 */
5360 if ((data & extra) && !(block_group->flags & extra))
5361 goto loop;
5362 }
5363
5364 have_block_group:
5365 cached = block_group_cache_done(block_group);
5366 if (unlikely(!cached)) {
5367 u64 free_percent;
5368
5369 found_uncached_bg = true;
5370 ret = cache_block_group(block_group, trans,
5371 orig_root, 1);
5372 if (block_group->cached == BTRFS_CACHE_FINISHED)
5373 goto alloc;
5374
5375 free_percent = btrfs_block_group_used(&block_group->item);
5376 free_percent *= 100;
5377 free_percent = div64_u64(free_percent,
5378 block_group->key.offset);
5379 free_percent = 100 - free_percent;
5380 if (free_percent > ideal_cache_percent &&
5381 likely(!block_group->ro)) {
5382 ideal_cache_offset = block_group->key.objectid;
5383 ideal_cache_percent = free_percent;
5384 }
5385
5386 /*
5387 * The caching workers are limited to 2 threads, so we
5388 * can queue as much work as we care to.
5389 */
5390 if (loop > LOOP_FIND_IDEAL) {
5391 ret = cache_block_group(block_group, trans,
5392 orig_root, 0);
5393 BUG_ON(ret);
5394 }
5395
5396 /*
5397 * If loop is set for cached only, try the next block
5398 * group.
5399 */
5400 if (loop == LOOP_FIND_IDEAL)
5401 goto loop;
5402 }
5403
5404 alloc:
5405 if (unlikely(block_group->ro))
5406 goto loop;
5407
5408 /*
5409 * Ok we want to try and use the cluster allocator, so
5410 * lets look there
5411 */
5412 if (last_ptr) {
5413 /*
5414 * the refill lock keeps out other
5415 * people trying to start a new cluster
5416 */
5417 spin_lock(&last_ptr->refill_lock);
5418 used_block_group = last_ptr->block_group;
5419 if (used_block_group != block_group &&
5420 (!used_block_group ||
5421 used_block_group->ro ||
5422 !block_group_bits(used_block_group, data))) {
5423 used_block_group = block_group;
5424 goto refill_cluster;
5425 }
5426
5427 if (used_block_group != block_group)
5428 btrfs_get_block_group(used_block_group);
5429
5430 offset = btrfs_alloc_from_cluster(used_block_group,
5431 last_ptr, num_bytes, used_block_group->key.objectid);
5432 if (offset) {
5433 /* we have a block, we're done */
5434 spin_unlock(&last_ptr->refill_lock);
5435 goto checks;
5436 }
5437
5438 WARN_ON(last_ptr->block_group != used_block_group);
5439 if (used_block_group != block_group) {
5440 btrfs_put_block_group(used_block_group);
5441 used_block_group = block_group;
5442 }
5443 refill_cluster:
5444 BUG_ON(used_block_group != block_group);
5445 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
5446 * set up a new clusters, so lets just skip it
5447 * and let the allocator find whatever block
5448 * it can find. If we reach this point, we
5449 * will have tried the cluster allocator
5450 * plenty of times and not have found
5451 * anything, so we are likely way too
5452 * fragmented for the clustering stuff to find
5453 * anything.
5454 *
5455 * However, if the cluster is taken from the
5456 * current block group, release the cluster
5457 * first, so that we stand a better chance of
5458 * succeeding in the unclustered
5459 * allocation. */
5460 if (loop >= LOOP_NO_EMPTY_SIZE &&
5461 last_ptr->block_group != block_group) {
5462 spin_unlock(&last_ptr->refill_lock);
5463 goto unclustered_alloc;
5464 }
5465
5466 /*
5467 * this cluster didn't work out, free it and
5468 * start over
5469 */
5470 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5471
5472 if (loop >= LOOP_NO_EMPTY_SIZE) {
5473 spin_unlock(&last_ptr->refill_lock);
5474 goto unclustered_alloc;
5475 }
5476
5477 /* allocate a cluster in this block group */
5478 ret = btrfs_find_space_cluster(trans, root,
5479 block_group, last_ptr,
5480 search_start, num_bytes,
5481 empty_cluster + empty_size);
5482 if (ret == 0) {
5483 /*
5484 * now pull our allocation out of this
5485 * cluster
5486 */
5487 offset = btrfs_alloc_from_cluster(block_group,
5488 last_ptr, num_bytes,
5489 search_start);
5490 if (offset) {
5491 /* we found one, proceed */
5492 spin_unlock(&last_ptr->refill_lock);
5493 goto checks;
5494 }
5495 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5496 && !failed_cluster_refill) {
5497 spin_unlock(&last_ptr->refill_lock);
5498
5499 failed_cluster_refill = true;
5500 wait_block_group_cache_progress(block_group,
5501 num_bytes + empty_cluster + empty_size);
5502 goto have_block_group;
5503 }
5504
5505 /*
5506 * at this point we either didn't find a cluster
5507 * or we weren't able to allocate a block from our
5508 * cluster. Free the cluster we've been trying
5509 * to use, and go to the next block group
5510 */
5511 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5512 spin_unlock(&last_ptr->refill_lock);
5513 goto loop;
5514 }
5515
5516 unclustered_alloc:
5517 spin_lock(&block_group->free_space_ctl->tree_lock);
5518 if (cached &&
5519 block_group->free_space_ctl->free_space <
5520 num_bytes + empty_cluster + empty_size) {
5521 spin_unlock(&block_group->free_space_ctl->tree_lock);
5522 goto loop;
5523 }
5524 spin_unlock(&block_group->free_space_ctl->tree_lock);
5525
5526 offset = btrfs_find_space_for_alloc(block_group, search_start,
5527 num_bytes, empty_size);
5528 /*
5529 * If we didn't find a chunk, and we haven't failed on this
5530 * block group before, and this block group is in the middle of
5531 * caching and we are ok with waiting, then go ahead and wait
5532 * for progress to be made, and set failed_alloc to true.
5533 *
5534 * If failed_alloc is true then we've already waited on this
5535 * block group once and should move on to the next block group.
5536 */
5537 if (!offset && !failed_alloc && !cached &&
5538 loop > LOOP_CACHING_NOWAIT) {
5539 wait_block_group_cache_progress(block_group,
5540 num_bytes + empty_size);
5541 failed_alloc = true;
5542 goto have_block_group;
5543 } else if (!offset) {
5544 if (!cached)
5545 have_caching_bg = true;
5546 goto loop;
5547 }
5548 checks:
5549 search_start = stripe_align(root, offset);
5550 /* move on to the next group */
5551 if (search_start + num_bytes >= search_end) {
5552 btrfs_add_free_space(used_block_group, offset, num_bytes);
5553 goto loop;
5554 }
5555
5556 /* move on to the next group */
5557 if (search_start + num_bytes >
5558 used_block_group->key.objectid + used_block_group->key.offset) {
5559 btrfs_add_free_space(used_block_group, offset, num_bytes);
5560 goto loop;
5561 }
5562
5563 if (offset < search_start)
5564 btrfs_add_free_space(used_block_group, offset,
5565 search_start - offset);
5566 BUG_ON(offset > search_start);
5567
5568 ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
5569 alloc_type);
5570 if (ret == -EAGAIN) {
5571 btrfs_add_free_space(used_block_group, offset, num_bytes);
5572 goto loop;
5573 }
5574
5575 /* we are all good, lets return */
5576 ins->objectid = search_start;
5577 ins->offset = num_bytes;
5578
5579 if (offset < search_start)
5580 btrfs_add_free_space(used_block_group, offset,
5581 search_start - offset);
5582 BUG_ON(offset > search_start);
5583 if (used_block_group != block_group)
5584 btrfs_put_block_group(used_block_group);
5585 btrfs_put_block_group(block_group);
5586 break;
5587 loop:
5588 failed_cluster_refill = false;
5589 failed_alloc = false;
5590 BUG_ON(index != get_block_group_index(block_group));
5591 if (used_block_group != block_group)
5592 btrfs_put_block_group(used_block_group);
5593 btrfs_put_block_group(block_group);
5594 }
5595 up_read(&space_info->groups_sem);
5596
5597 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
5598 goto search;
5599
5600 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5601 goto search;
5602
5603 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5604 * for them to make caching progress. Also
5605 * determine the best possible bg to cache
5606 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5607 * caching kthreads as we move along
5608 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5609 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5610 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5611 * again
5612 */
5613 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5614 index = 0;
5615 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5616 found_uncached_bg = false;
5617 loop++;
5618 if (!ideal_cache_percent)
5619 goto search;
5620
5621 /*
5622 * 1 of the following 2 things have happened so far
5623 *
5624 * 1) We found an ideal block group for caching that
5625 * is mostly full and will cache quickly, so we might
5626 * as well wait for it.
5627 *
5628 * 2) We searched for cached only and we didn't find
5629 * anything, and we didn't start any caching kthreads
5630 * either, so chances are we will loop through and
5631 * start a couple caching kthreads, and then come back
5632 * around and just wait for them. This will be slower
5633 * because we will have 2 caching kthreads reading at
5634 * the same time when we could have just started one
5635 * and waited for it to get far enough to give us an
5636 * allocation, so go ahead and go to the wait caching
5637 * loop.
5638 */
5639 loop = LOOP_CACHING_WAIT;
5640 search_start = ideal_cache_offset;
5641 ideal_cache_percent = 0;
5642 goto ideal_cache;
5643 } else if (loop == LOOP_FIND_IDEAL) {
5644 /*
5645 * Didn't find a uncached bg, wait on anything we find
5646 * next.
5647 */
5648 loop = LOOP_CACHING_WAIT;
5649 goto search;
5650 }
5651
5652 loop++;
5653
5654 if (loop == LOOP_ALLOC_CHUNK) {
5655 if (allowed_chunk_alloc) {
5656 ret = do_chunk_alloc(trans, root, num_bytes +
5657 2 * 1024 * 1024, data,
5658 CHUNK_ALLOC_LIMITED);
5659 allowed_chunk_alloc = 0;
5660 if (ret == 1)
5661 done_chunk_alloc = 1;
5662 } else if (!done_chunk_alloc &&
5663 space_info->force_alloc ==
5664 CHUNK_ALLOC_NO_FORCE) {
5665 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5666 }
5667
5668 /*
5669 * We didn't allocate a chunk, go ahead and drop the
5670 * empty size and loop again.
5671 */
5672 if (!done_chunk_alloc)
5673 loop = LOOP_NO_EMPTY_SIZE;
5674 }
5675
5676 if (loop == LOOP_NO_EMPTY_SIZE) {
5677 empty_size = 0;
5678 empty_cluster = 0;
5679 }
5680
5681 goto search;
5682 } else if (!ins->objectid) {
5683 ret = -ENOSPC;
5684 } else if (ins->objectid) {
5685 ret = 0;
5686 }
5687
5688 return ret;
5689 }
5690
5691 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5692 int dump_block_groups)
5693 {
5694 struct btrfs_block_group_cache *cache;
5695 int index = 0;
5696
5697 spin_lock(&info->lock);
5698 printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
5699 (unsigned long long)info->flags,
5700 (unsigned long long)(info->total_bytes - info->bytes_used -
5701 info->bytes_pinned - info->bytes_reserved -
5702 info->bytes_readonly),
5703 (info->full) ? "" : "not ");
5704 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5705 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5706 (unsigned long long)info->total_bytes,
5707 (unsigned long long)info->bytes_used,
5708 (unsigned long long)info->bytes_pinned,
5709 (unsigned long long)info->bytes_reserved,
5710 (unsigned long long)info->bytes_may_use,
5711 (unsigned long long)info->bytes_readonly);
5712 spin_unlock(&info->lock);
5713
5714 if (!dump_block_groups)
5715 return;
5716
5717 down_read(&info->groups_sem);
5718 again:
5719 list_for_each_entry(cache, &info->block_groups[index], list) {
5720 spin_lock(&cache->lock);
5721 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5722 "%llu pinned %llu reserved\n",
5723 (unsigned long long)cache->key.objectid,
5724 (unsigned long long)cache->key.offset,
5725 (unsigned long long)btrfs_block_group_used(&cache->item),
5726 (unsigned long long)cache->pinned,
5727 (unsigned long long)cache->reserved);
5728 btrfs_dump_free_space(cache, bytes);
5729 spin_unlock(&cache->lock);
5730 }
5731 if (++index < BTRFS_NR_RAID_TYPES)
5732 goto again;
5733 up_read(&info->groups_sem);
5734 }
5735
5736 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5737 struct btrfs_root *root,
5738 u64 num_bytes, u64 min_alloc_size,
5739 u64 empty_size, u64 hint_byte,
5740 u64 search_end, struct btrfs_key *ins,
5741 u64 data)
5742 {
5743 int ret;
5744 u64 search_start = 0;
5745
5746 data = btrfs_get_alloc_profile(root, data);
5747 again:
5748 /*
5749 * the only place that sets empty_size is btrfs_realloc_node, which
5750 * is not called recursively on allocations
5751 */
5752 if (empty_size || root->ref_cows)
5753 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5754 num_bytes + 2 * 1024 * 1024, data,
5755 CHUNK_ALLOC_NO_FORCE);
5756
5757 WARN_ON(num_bytes < root->sectorsize);
5758 ret = find_free_extent(trans, root, num_bytes, empty_size,
5759 search_start, search_end, hint_byte,
5760 ins, data);
5761
5762 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5763 num_bytes = num_bytes >> 1;
5764 num_bytes = num_bytes & ~(root->sectorsize - 1);
5765 num_bytes = max(num_bytes, min_alloc_size);
5766 do_chunk_alloc(trans, root->fs_info->extent_root,
5767 num_bytes, data, CHUNK_ALLOC_FORCE);
5768 goto again;
5769 }
5770 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5771 struct btrfs_space_info *sinfo;
5772
5773 sinfo = __find_space_info(root->fs_info, data);
5774 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5775 "wanted %llu\n", (unsigned long long)data,
5776 (unsigned long long)num_bytes);
5777 dump_space_info(sinfo, num_bytes, 1);
5778 }
5779
5780 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
5781
5782 return ret;
5783 }
5784
5785 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
5786 u64 start, u64 len, int pin)
5787 {
5788 struct btrfs_block_group_cache *cache;
5789 int ret = 0;
5790
5791 cache = btrfs_lookup_block_group(root->fs_info, start);
5792 if (!cache) {
5793 printk(KERN_ERR "Unable to find block group for %llu\n",
5794 (unsigned long long)start);
5795 return -ENOSPC;
5796 }
5797
5798 if (btrfs_test_opt(root, DISCARD))
5799 ret = btrfs_discard_extent(root, start, len, NULL);
5800
5801 if (pin)
5802 pin_down_extent(root, cache, start, len, 1);
5803 else {
5804 btrfs_add_free_space(cache, start, len);
5805 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
5806 }
5807 btrfs_put_block_group(cache);
5808
5809 trace_btrfs_reserved_extent_free(root, start, len);
5810
5811 return ret;
5812 }
5813
5814 int btrfs_free_reserved_extent(struct btrfs_root *root,
5815 u64 start, u64 len)
5816 {
5817 return __btrfs_free_reserved_extent(root, start, len, 0);
5818 }
5819
5820 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
5821 u64 start, u64 len)
5822 {
5823 return __btrfs_free_reserved_extent(root, start, len, 1);
5824 }
5825
5826 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5827 struct btrfs_root *root,
5828 u64 parent, u64 root_objectid,
5829 u64 flags, u64 owner, u64 offset,
5830 struct btrfs_key *ins, int ref_mod)
5831 {
5832 int ret;
5833 struct btrfs_fs_info *fs_info = root->fs_info;
5834 struct btrfs_extent_item *extent_item;
5835 struct btrfs_extent_inline_ref *iref;
5836 struct btrfs_path *path;
5837 struct extent_buffer *leaf;
5838 int type;
5839 u32 size;
5840
5841 if (parent > 0)
5842 type = BTRFS_SHARED_DATA_REF_KEY;
5843 else
5844 type = BTRFS_EXTENT_DATA_REF_KEY;
5845
5846 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5847
5848 path = btrfs_alloc_path();
5849 if (!path)
5850 return -ENOMEM;
5851
5852 path->leave_spinning = 1;
5853 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5854 ins, size);
5855 BUG_ON(ret);
5856
5857 leaf = path->nodes[0];
5858 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5859 struct btrfs_extent_item);
5860 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5861 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5862 btrfs_set_extent_flags(leaf, extent_item,
5863 flags | BTRFS_EXTENT_FLAG_DATA);
5864
5865 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5866 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5867 if (parent > 0) {
5868 struct btrfs_shared_data_ref *ref;
5869 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5870 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5871 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5872 } else {
5873 struct btrfs_extent_data_ref *ref;
5874 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5875 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5876 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5877 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5878 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5879 }
5880
5881 btrfs_mark_buffer_dirty(path->nodes[0]);
5882 btrfs_free_path(path);
5883
5884 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5885 if (ret) {
5886 printk(KERN_ERR "btrfs update block group failed for %llu "
5887 "%llu\n", (unsigned long long)ins->objectid,
5888 (unsigned long long)ins->offset);
5889 BUG();
5890 }
5891 return ret;
5892 }
5893
5894 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5895 struct btrfs_root *root,
5896 u64 parent, u64 root_objectid,
5897 u64 flags, struct btrfs_disk_key *key,
5898 int level, struct btrfs_key *ins)
5899 {
5900 int ret;
5901 struct btrfs_fs_info *fs_info = root->fs_info;
5902 struct btrfs_extent_item *extent_item;
5903 struct btrfs_tree_block_info *block_info;
5904 struct btrfs_extent_inline_ref *iref;
5905 struct btrfs_path *path;
5906 struct extent_buffer *leaf;
5907 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5908
5909 path = btrfs_alloc_path();
5910 if (!path)
5911 return -ENOMEM;
5912
5913 path->leave_spinning = 1;
5914 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5915 ins, size);
5916 BUG_ON(ret);
5917
5918 leaf = path->nodes[0];
5919 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5920 struct btrfs_extent_item);
5921 btrfs_set_extent_refs(leaf, extent_item, 1);
5922 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5923 btrfs_set_extent_flags(leaf, extent_item,
5924 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5925 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5926
5927 btrfs_set_tree_block_key(leaf, block_info, key);
5928 btrfs_set_tree_block_level(leaf, block_info, level);
5929
5930 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5931 if (parent > 0) {
5932 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5933 btrfs_set_extent_inline_ref_type(leaf, iref,
5934 BTRFS_SHARED_BLOCK_REF_KEY);
5935 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5936 } else {
5937 btrfs_set_extent_inline_ref_type(leaf, iref,
5938 BTRFS_TREE_BLOCK_REF_KEY);
5939 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5940 }
5941
5942 btrfs_mark_buffer_dirty(leaf);
5943 btrfs_free_path(path);
5944
5945 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5946 if (ret) {
5947 printk(KERN_ERR "btrfs update block group failed for %llu "
5948 "%llu\n", (unsigned long long)ins->objectid,
5949 (unsigned long long)ins->offset);
5950 BUG();
5951 }
5952 return ret;
5953 }
5954
5955 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5956 struct btrfs_root *root,
5957 u64 root_objectid, u64 owner,
5958 u64 offset, struct btrfs_key *ins)
5959 {
5960 int ret;
5961
5962 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5963
5964 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
5965 ins->offset, 0,
5966 root_objectid, owner, offset,
5967 BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
5968 return ret;
5969 }
5970
5971 /*
5972 * this is used by the tree logging recovery code. It records that
5973 * an extent has been allocated and makes sure to clear the free
5974 * space cache bits as well
5975 */
5976 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5977 struct btrfs_root *root,
5978 u64 root_objectid, u64 owner, u64 offset,
5979 struct btrfs_key *ins)
5980 {
5981 int ret;
5982 struct btrfs_block_group_cache *block_group;
5983 struct btrfs_caching_control *caching_ctl;
5984 u64 start = ins->objectid;
5985 u64 num_bytes = ins->offset;
5986
5987 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5988 cache_block_group(block_group, trans, NULL, 0);
5989 caching_ctl = get_caching_control(block_group);
5990
5991 if (!caching_ctl) {
5992 BUG_ON(!block_group_cache_done(block_group));
5993 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5994 BUG_ON(ret);
5995 } else {
5996 mutex_lock(&caching_ctl->mutex);
5997
5998 if (start >= caching_ctl->progress) {
5999 ret = add_excluded_extent(root, start, num_bytes);
6000 BUG_ON(ret);
6001 } else if (start + num_bytes <= caching_ctl->progress) {
6002 ret = btrfs_remove_free_space(block_group,
6003 start, num_bytes);
6004 BUG_ON(ret);
6005 } else {
6006 num_bytes = caching_ctl->progress - start;
6007 ret = btrfs_remove_free_space(block_group,
6008 start, num_bytes);
6009 BUG_ON(ret);
6010
6011 start = caching_ctl->progress;
6012 num_bytes = ins->objectid + ins->offset -
6013 caching_ctl->progress;
6014 ret = add_excluded_extent(root, start, num_bytes);
6015 BUG_ON(ret);
6016 }
6017
6018 mutex_unlock(&caching_ctl->mutex);
6019 put_caching_control(caching_ctl);
6020 }
6021
6022 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
6023 RESERVE_ALLOC_NO_ACCOUNT);
6024 BUG_ON(ret);
6025 btrfs_put_block_group(block_group);
6026 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
6027 0, owner, offset, ins, 1);
6028 return ret;
6029 }
6030
6031 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
6032 struct btrfs_root *root,
6033 u64 bytenr, u32 blocksize,
6034 int level)
6035 {
6036 struct extent_buffer *buf;
6037
6038 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
6039 if (!buf)
6040 return ERR_PTR(-ENOMEM);
6041 btrfs_set_header_generation(buf, trans->transid);
6042 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
6043 btrfs_tree_lock(buf);
6044 clean_tree_block(trans, root, buf);
6045
6046 btrfs_set_lock_blocking(buf);
6047 btrfs_set_buffer_uptodate(buf);
6048
6049 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
6050 /*
6051 * we allow two log transactions at a time, use different
6052 * EXENT bit to differentiate dirty pages.
6053 */
6054 if (root->log_transid % 2 == 0)
6055 set_extent_dirty(&root->dirty_log_pages, buf->start,
6056 buf->start + buf->len - 1, GFP_NOFS);
6057 else
6058 set_extent_new(&root->dirty_log_pages, buf->start,
6059 buf->start + buf->len - 1, GFP_NOFS);
6060 } else {
6061 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
6062 buf->start + buf->len - 1, GFP_NOFS);
6063 }
6064 trans->blocks_used++;
6065 /* this returns a buffer locked for blocking */
6066 return buf;
6067 }
6068
6069 static struct btrfs_block_rsv *
6070 use_block_rsv(struct btrfs_trans_handle *trans,
6071 struct btrfs_root *root, u32 blocksize)
6072 {
6073 struct btrfs_block_rsv *block_rsv;
6074 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6075 int ret;
6076
6077 block_rsv = get_block_rsv(trans, root);
6078
6079 if (block_rsv->size == 0) {
6080 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6081 /*
6082 * If we couldn't reserve metadata bytes try and use some from
6083 * the global reserve.
6084 */
6085 if (ret && block_rsv != global_rsv) {
6086 ret = block_rsv_use_bytes(global_rsv, blocksize);
6087 if (!ret)
6088 return global_rsv;
6089 return ERR_PTR(ret);
6090 } else if (ret) {
6091 return ERR_PTR(ret);
6092 }
6093 return block_rsv;
6094 }
6095
6096 ret = block_rsv_use_bytes(block_rsv, blocksize);
6097 if (!ret)
6098 return block_rsv;
6099 if (ret) {
6100 static DEFINE_RATELIMIT_STATE(_rs,
6101 DEFAULT_RATELIMIT_INTERVAL,
6102 /*DEFAULT_RATELIMIT_BURST*/ 2);
6103 if (__ratelimit(&_rs)) {
6104 printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
6105 WARN_ON(1);
6106 }
6107 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
6108 if (!ret) {
6109 return block_rsv;
6110 } else if (ret && block_rsv != global_rsv) {
6111 ret = block_rsv_use_bytes(global_rsv, blocksize);
6112 if (!ret)
6113 return global_rsv;
6114 }
6115 }
6116
6117 return ERR_PTR(-ENOSPC);
6118 }
6119
6120 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
6121 {
6122 block_rsv_add_bytes(block_rsv, blocksize, 0);
6123 block_rsv_release_bytes(block_rsv, NULL, 0);
6124 }
6125
6126 /*
6127 * finds a free extent and does all the dirty work required for allocation
6128 * returns the key for the extent through ins, and a tree buffer for
6129 * the first block of the extent through buf.
6130 *
6131 * returns the tree buffer or NULL.
6132 */
6133 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6134 struct btrfs_root *root, u32 blocksize,
6135 u64 parent, u64 root_objectid,
6136 struct btrfs_disk_key *key, int level,
6137 u64 hint, u64 empty_size, int for_cow)
6138 {
6139 struct btrfs_key ins;
6140 struct btrfs_block_rsv *block_rsv;
6141 struct extent_buffer *buf;
6142 u64 flags = 0;
6143 int ret;
6144
6145
6146 block_rsv = use_block_rsv(trans, root, blocksize);
6147 if (IS_ERR(block_rsv))
6148 return ERR_CAST(block_rsv);
6149
6150 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
6151 empty_size, hint, (u64)-1, &ins, 0);
6152 if (ret) {
6153 unuse_block_rsv(block_rsv, blocksize);
6154 return ERR_PTR(ret);
6155 }
6156
6157 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
6158 blocksize, level);
6159 BUG_ON(IS_ERR(buf));
6160
6161 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
6162 if (parent == 0)
6163 parent = ins.objectid;
6164 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
6165 } else
6166 BUG_ON(parent > 0);
6167
6168 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
6169 struct btrfs_delayed_extent_op *extent_op;
6170 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
6171 BUG_ON(!extent_op);
6172 if (key)
6173 memcpy(&extent_op->key, key, sizeof(extent_op->key));
6174 else
6175 memset(&extent_op->key, 0, sizeof(extent_op->key));
6176 extent_op->flags_to_set = flags;
6177 extent_op->update_key = 1;
6178 extent_op->update_flags = 1;
6179 extent_op->is_data = 0;
6180
6181 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6182 ins.objectid,
6183 ins.offset, parent, root_objectid,
6184 level, BTRFS_ADD_DELAYED_EXTENT,
6185 extent_op, for_cow);
6186 BUG_ON(ret);
6187 }
6188 return buf;
6189 }
6190
6191 struct walk_control {
6192 u64 refs[BTRFS_MAX_LEVEL];
6193 u64 flags[BTRFS_MAX_LEVEL];
6194 struct btrfs_key update_progress;
6195 int stage;
6196 int level;
6197 int shared_level;
6198 int update_ref;
6199 int keep_locks;
6200 int reada_slot;
6201 int reada_count;
6202 int for_reloc;
6203 };
6204
6205 #define DROP_REFERENCE 1
6206 #define UPDATE_BACKREF 2
6207
6208 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
6209 struct btrfs_root *root,
6210 struct walk_control *wc,
6211 struct btrfs_path *path)
6212 {
6213 u64 bytenr;
6214 u64 generation;
6215 u64 refs;
6216 u64 flags;
6217 u32 nritems;
6218 u32 blocksize;
6219 struct btrfs_key key;
6220 struct extent_buffer *eb;
6221 int ret;
6222 int slot;
6223 int nread = 0;
6224
6225 if (path->slots[wc->level] < wc->reada_slot) {
6226 wc->reada_count = wc->reada_count * 2 / 3;
6227 wc->reada_count = max(wc->reada_count, 2);
6228 } else {
6229 wc->reada_count = wc->reada_count * 3 / 2;
6230 wc->reada_count = min_t(int, wc->reada_count,
6231 BTRFS_NODEPTRS_PER_BLOCK(root));
6232 }
6233
6234 eb = path->nodes[wc->level];
6235 nritems = btrfs_header_nritems(eb);
6236 blocksize = btrfs_level_size(root, wc->level - 1);
6237
6238 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
6239 if (nread >= wc->reada_count)
6240 break;
6241
6242 cond_resched();
6243 bytenr = btrfs_node_blockptr(eb, slot);
6244 generation = btrfs_node_ptr_generation(eb, slot);
6245
6246 if (slot == path->slots[wc->level])
6247 goto reada;
6248
6249 if (wc->stage == UPDATE_BACKREF &&
6250 generation <= root->root_key.offset)
6251 continue;
6252
6253 /* We don't lock the tree block, it's OK to be racy here */
6254 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6255 &refs, &flags);
6256 BUG_ON(ret);
6257 BUG_ON(refs == 0);
6258
6259 if (wc->stage == DROP_REFERENCE) {
6260 if (refs == 1)
6261 goto reada;
6262
6263 if (wc->level == 1 &&
6264 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6265 continue;
6266 if (!wc->update_ref ||
6267 generation <= root->root_key.offset)
6268 continue;
6269 btrfs_node_key_to_cpu(eb, &key, slot);
6270 ret = btrfs_comp_cpu_keys(&key,
6271 &wc->update_progress);
6272 if (ret < 0)
6273 continue;
6274 } else {
6275 if (wc->level == 1 &&
6276 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6277 continue;
6278 }
6279 reada:
6280 ret = readahead_tree_block(root, bytenr, blocksize,
6281 generation);
6282 if (ret)
6283 break;
6284 nread++;
6285 }
6286 wc->reada_slot = slot;
6287 }
6288
6289 /*
6290 * hepler to process tree block while walking down the tree.
6291 *
6292 * when wc->stage == UPDATE_BACKREF, this function updates
6293 * back refs for pointers in the block.
6294 *
6295 * NOTE: return value 1 means we should stop walking down.
6296 */
6297 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6298 struct btrfs_root *root,
6299 struct btrfs_path *path,
6300 struct walk_control *wc, int lookup_info)
6301 {
6302 int level = wc->level;
6303 struct extent_buffer *eb = path->nodes[level];
6304 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6305 int ret;
6306
6307 if (wc->stage == UPDATE_BACKREF &&
6308 btrfs_header_owner(eb) != root->root_key.objectid)
6309 return 1;
6310
6311 /*
6312 * when reference count of tree block is 1, it won't increase
6313 * again. once full backref flag is set, we never clear it.
6314 */
6315 if (lookup_info &&
6316 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
6317 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
6318 BUG_ON(!path->locks[level]);
6319 ret = btrfs_lookup_extent_info(trans, root,
6320 eb->start, eb->len,
6321 &wc->refs[level],
6322 &wc->flags[level]);
6323 BUG_ON(ret);
6324 BUG_ON(wc->refs[level] == 0);
6325 }
6326
6327 if (wc->stage == DROP_REFERENCE) {
6328 if (wc->refs[level] > 1)
6329 return 1;
6330
6331 if (path->locks[level] && !wc->keep_locks) {
6332 btrfs_tree_unlock_rw(eb, path->locks[level]);
6333 path->locks[level] = 0;
6334 }
6335 return 0;
6336 }
6337
6338 /* wc->stage == UPDATE_BACKREF */
6339 if (!(wc->flags[level] & flag)) {
6340 BUG_ON(!path->locks[level]);
6341 ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
6342 BUG_ON(ret);
6343 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6344 BUG_ON(ret);
6345 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6346 eb->len, flag, 0);
6347 BUG_ON(ret);
6348 wc->flags[level] |= flag;
6349 }
6350
6351 /*
6352 * the block is shared by multiple trees, so it's not good to
6353 * keep the tree lock
6354 */
6355 if (path->locks[level] && level > 0) {
6356 btrfs_tree_unlock_rw(eb, path->locks[level]);
6357 path->locks[level] = 0;
6358 }
6359 return 0;
6360 }
6361
6362 /*
6363 * hepler to process tree block pointer.
6364 *
6365 * when wc->stage == DROP_REFERENCE, this function checks
6366 * reference count of the block pointed to. if the block
6367 * is shared and we need update back refs for the subtree
6368 * rooted at the block, this function changes wc->stage to
6369 * UPDATE_BACKREF. if the block is shared and there is no
6370 * need to update back, this function drops the reference
6371 * to the block.
6372 *
6373 * NOTE: return value 1 means we should stop walking down.
6374 */
6375 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
6376 struct btrfs_root *root,
6377 struct btrfs_path *path,
6378 struct walk_control *wc, int *lookup_info)
6379 {
6380 u64 bytenr;
6381 u64 generation;
6382 u64 parent;
6383 u32 blocksize;
6384 struct btrfs_key key;
6385 struct extent_buffer *next;
6386 int level = wc->level;
6387 int reada = 0;
6388 int ret = 0;
6389
6390 generation = btrfs_node_ptr_generation(path->nodes[level],
6391 path->slots[level]);
6392 /*
6393 * if the lower level block was created before the snapshot
6394 * was created, we know there is no need to update back refs
6395 * for the subtree
6396 */
6397 if (wc->stage == UPDATE_BACKREF &&
6398 generation <= root->root_key.offset) {
6399 *lookup_info = 1;
6400 return 1;
6401 }
6402
6403 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
6404 blocksize = btrfs_level_size(root, level - 1);
6405
6406 next = btrfs_find_tree_block(root, bytenr, blocksize);
6407 if (!next) {
6408 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
6409 if (!next)
6410 return -ENOMEM;
6411 reada = 1;
6412 }
6413 btrfs_tree_lock(next);
6414 btrfs_set_lock_blocking(next);
6415
6416 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
6417 &wc->refs[level - 1],
6418 &wc->flags[level - 1]);
6419 BUG_ON(ret);
6420 BUG_ON(wc->refs[level - 1] == 0);
6421 *lookup_info = 0;
6422
6423 if (wc->stage == DROP_REFERENCE) {
6424 if (wc->refs[level - 1] > 1) {
6425 if (level == 1 &&
6426 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6427 goto skip;
6428
6429 if (!wc->update_ref ||
6430 generation <= root->root_key.offset)
6431 goto skip;
6432
6433 btrfs_node_key_to_cpu(path->nodes[level], &key,
6434 path->slots[level]);
6435 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6436 if (ret < 0)
6437 goto skip;
6438
6439 wc->stage = UPDATE_BACKREF;
6440 wc->shared_level = level - 1;
6441 }
6442 } else {
6443 if (level == 1 &&
6444 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6445 goto skip;
6446 }
6447
6448 if (!btrfs_buffer_uptodate(next, generation)) {
6449 btrfs_tree_unlock(next);
6450 free_extent_buffer(next);
6451 next = NULL;
6452 *lookup_info = 1;
6453 }
6454
6455 if (!next) {
6456 if (reada && level == 1)
6457 reada_walk_down(trans, root, wc, path);
6458 next = read_tree_block(root, bytenr, blocksize, generation);
6459 if (!next)
6460 return -EIO;
6461 btrfs_tree_lock(next);
6462 btrfs_set_lock_blocking(next);
6463 }
6464
6465 level--;
6466 BUG_ON(level != btrfs_header_level(next));
6467 path->nodes[level] = next;
6468 path->slots[level] = 0;
6469 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6470 wc->level = level;
6471 if (wc->level == 1)
6472 wc->reada_slot = 0;
6473 return 0;
6474 skip:
6475 wc->refs[level - 1] = 0;
6476 wc->flags[level - 1] = 0;
6477 if (wc->stage == DROP_REFERENCE) {
6478 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6479 parent = path->nodes[level]->start;
6480 } else {
6481 BUG_ON(root->root_key.objectid !=
6482 btrfs_header_owner(path->nodes[level]));
6483 parent = 0;
6484 }
6485
6486 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6487 root->root_key.objectid, level - 1, 0, 0);
6488 BUG_ON(ret);
6489 }
6490 btrfs_tree_unlock(next);
6491 free_extent_buffer(next);
6492 *lookup_info = 1;
6493 return 1;
6494 }
6495
6496 /*
6497 * hepler to process tree block while walking up the tree.
6498 *
6499 * when wc->stage == DROP_REFERENCE, this function drops
6500 * reference count on the block.
6501 *
6502 * when wc->stage == UPDATE_BACKREF, this function changes
6503 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6504 * to UPDATE_BACKREF previously while processing the block.
6505 *
6506 * NOTE: return value 1 means we should stop walking up.
6507 */
6508 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6509 struct btrfs_root *root,
6510 struct btrfs_path *path,
6511 struct walk_control *wc)
6512 {
6513 int ret;
6514 int level = wc->level;
6515 struct extent_buffer *eb = path->nodes[level];
6516 u64 parent = 0;
6517
6518 if (wc->stage == UPDATE_BACKREF) {
6519 BUG_ON(wc->shared_level < level);
6520 if (level < wc->shared_level)
6521 goto out;
6522
6523 ret = find_next_key(path, level + 1, &wc->update_progress);
6524 if (ret > 0)
6525 wc->update_ref = 0;
6526
6527 wc->stage = DROP_REFERENCE;
6528 wc->shared_level = -1;
6529 path->slots[level] = 0;
6530
6531 /*
6532 * check reference count again if the block isn't locked.
6533 * we should start walking down the tree again if reference
6534 * count is one.
6535 */
6536 if (!path->locks[level]) {
6537 BUG_ON(level == 0);
6538 btrfs_tree_lock(eb);
6539 btrfs_set_lock_blocking(eb);
6540 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6541
6542 ret = btrfs_lookup_extent_info(trans, root,
6543 eb->start, eb->len,
6544 &wc->refs[level],
6545 &wc->flags[level]);
6546 BUG_ON(ret);
6547 BUG_ON(wc->refs[level] == 0);
6548 if (wc->refs[level] == 1) {
6549 btrfs_tree_unlock_rw(eb, path->locks[level]);
6550 return 1;
6551 }
6552 }
6553 }
6554
6555 /* wc->stage == DROP_REFERENCE */
6556 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6557
6558 if (wc->refs[level] == 1) {
6559 if (level == 0) {
6560 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6561 ret = btrfs_dec_ref(trans, root, eb, 1,
6562 wc->for_reloc);
6563 else
6564 ret = btrfs_dec_ref(trans, root, eb, 0,
6565 wc->for_reloc);
6566 BUG_ON(ret);
6567 }
6568 /* make block locked assertion in clean_tree_block happy */
6569 if (!path->locks[level] &&
6570 btrfs_header_generation(eb) == trans->transid) {
6571 btrfs_tree_lock(eb);
6572 btrfs_set_lock_blocking(eb);
6573 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6574 }
6575 clean_tree_block(trans, root, eb);
6576 }
6577
6578 if (eb == root->node) {
6579 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6580 parent = eb->start;
6581 else
6582 BUG_ON(root->root_key.objectid !=
6583 btrfs_header_owner(eb));
6584 } else {
6585 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6586 parent = path->nodes[level + 1]->start;
6587 else
6588 BUG_ON(root->root_key.objectid !=
6589 btrfs_header_owner(path->nodes[level + 1]));
6590 }
6591
6592 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
6593 out:
6594 wc->refs[level] = 0;
6595 wc->flags[level] = 0;
6596 return 0;
6597 }
6598
6599 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6600 struct btrfs_root *root,
6601 struct btrfs_path *path,
6602 struct walk_control *wc)
6603 {
6604 int level = wc->level;
6605 int lookup_info = 1;
6606 int ret;
6607
6608 while (level >= 0) {
6609 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6610 if (ret > 0)
6611 break;
6612
6613 if (level == 0)
6614 break;
6615
6616 if (path->slots[level] >=
6617 btrfs_header_nritems(path->nodes[level]))
6618 break;
6619
6620 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6621 if (ret > 0) {
6622 path->slots[level]++;
6623 continue;
6624 } else if (ret < 0)
6625 return ret;
6626 level = wc->level;
6627 }
6628 return 0;
6629 }
6630
6631 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6632 struct btrfs_root *root,
6633 struct btrfs_path *path,
6634 struct walk_control *wc, int max_level)
6635 {
6636 int level = wc->level;
6637 int ret;
6638
6639 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6640 while (level < max_level && path->nodes[level]) {
6641 wc->level = level;
6642 if (path->slots[level] + 1 <
6643 btrfs_header_nritems(path->nodes[level])) {
6644 path->slots[level]++;
6645 return 0;
6646 } else {
6647 ret = walk_up_proc(trans, root, path, wc);
6648 if (ret > 0)
6649 return 0;
6650
6651 if (path->locks[level]) {
6652 btrfs_tree_unlock_rw(path->nodes[level],
6653 path->locks[level]);
6654 path->locks[level] = 0;
6655 }
6656 free_extent_buffer(path->nodes[level]);
6657 path->nodes[level] = NULL;
6658 level++;
6659 }
6660 }
6661 return 1;
6662 }
6663
6664 /*
6665 * drop a subvolume tree.
6666 *
6667 * this function traverses the tree freeing any blocks that only
6668 * referenced by the tree.
6669 *
6670 * when a shared tree block is found. this function decreases its
6671 * reference count by one. if update_ref is true, this function
6672 * also make sure backrefs for the shared block and all lower level
6673 * blocks are properly updated.
6674 */
6675 void btrfs_drop_snapshot(struct btrfs_root *root,
6676 struct btrfs_block_rsv *block_rsv, int update_ref,
6677 int for_reloc)
6678 {
6679 struct btrfs_path *path;
6680 struct btrfs_trans_handle *trans;
6681 struct btrfs_root *tree_root = root->fs_info->tree_root;
6682 struct btrfs_root_item *root_item = &root->root_item;
6683 struct walk_control *wc;
6684 struct btrfs_key key;
6685 int err = 0;
6686 int ret;
6687 int level;
6688
6689 path = btrfs_alloc_path();
6690 if (!path) {
6691 err = -ENOMEM;
6692 goto out;
6693 }
6694
6695 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6696 if (!wc) {
6697 btrfs_free_path(path);
6698 err = -ENOMEM;
6699 goto out;
6700 }
6701
6702 trans = btrfs_start_transaction(tree_root, 0);
6703 BUG_ON(IS_ERR(trans));
6704
6705 if (block_rsv)
6706 trans->block_rsv = block_rsv;
6707
6708 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6709 level = btrfs_header_level(root->node);
6710 path->nodes[level] = btrfs_lock_root_node(root);
6711 btrfs_set_lock_blocking(path->nodes[level]);
6712 path->slots[level] = 0;
6713 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6714 memset(&wc->update_progress, 0,
6715 sizeof(wc->update_progress));
6716 } else {
6717 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6718 memcpy(&wc->update_progress, &key,
6719 sizeof(wc->update_progress));
6720
6721 level = root_item->drop_level;
6722 BUG_ON(level == 0);
6723 path->lowest_level = level;
6724 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6725 path->lowest_level = 0;
6726 if (ret < 0) {
6727 err = ret;
6728 goto out_free;
6729 }
6730 WARN_ON(ret > 0);
6731
6732 /*
6733 * unlock our path, this is safe because only this
6734 * function is allowed to delete this snapshot
6735 */
6736 btrfs_unlock_up_safe(path, 0);
6737
6738 level = btrfs_header_level(root->node);
6739 while (1) {
6740 btrfs_tree_lock(path->nodes[level]);
6741 btrfs_set_lock_blocking(path->nodes[level]);
6742
6743 ret = btrfs_lookup_extent_info(trans, root,
6744 path->nodes[level]->start,
6745 path->nodes[level]->len,
6746 &wc->refs[level],
6747 &wc->flags[level]);
6748 BUG_ON(ret);
6749 BUG_ON(wc->refs[level] == 0);
6750
6751 if (level == root_item->drop_level)
6752 break;
6753
6754 btrfs_tree_unlock(path->nodes[level]);
6755 WARN_ON(wc->refs[level] != 1);
6756 level--;
6757 }
6758 }
6759
6760 wc->level = level;
6761 wc->shared_level = -1;
6762 wc->stage = DROP_REFERENCE;
6763 wc->update_ref = update_ref;
6764 wc->keep_locks = 0;
6765 wc->for_reloc = for_reloc;
6766 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6767
6768 while (1) {
6769 ret = walk_down_tree(trans, root, path, wc);
6770 if (ret < 0) {
6771 err = ret;
6772 break;
6773 }
6774
6775 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6776 if (ret < 0) {
6777 err = ret;
6778 break;
6779 }
6780
6781 if (ret > 0) {
6782 BUG_ON(wc->stage != DROP_REFERENCE);
6783 break;
6784 }
6785
6786 if (wc->stage == DROP_REFERENCE) {
6787 level = wc->level;
6788 btrfs_node_key(path->nodes[level],
6789 &root_item->drop_progress,
6790 path->slots[level]);
6791 root_item->drop_level = level;
6792 }
6793
6794 BUG_ON(wc->level == 0);
6795 if (btrfs_should_end_transaction(trans, tree_root)) {
6796 ret = btrfs_update_root(trans, tree_root,
6797 &root->root_key,
6798 root_item);
6799 BUG_ON(ret);
6800
6801 btrfs_end_transaction_throttle(trans, tree_root);
6802 trans = btrfs_start_transaction(tree_root, 0);
6803 BUG_ON(IS_ERR(trans));
6804 if (block_rsv)
6805 trans->block_rsv = block_rsv;
6806 }
6807 }
6808 btrfs_release_path(path);
6809 BUG_ON(err);
6810
6811 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6812 BUG_ON(ret);
6813
6814 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6815 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6816 NULL, NULL);
6817 BUG_ON(ret < 0);
6818 if (ret > 0) {
6819 /* if we fail to delete the orphan item this time
6820 * around, it'll get picked up the next time.
6821 *
6822 * The most common failure here is just -ENOENT.
6823 */
6824 btrfs_del_orphan_item(trans, tree_root,
6825 root->root_key.objectid);
6826 }
6827 }
6828
6829 if (root->in_radix) {
6830 btrfs_free_fs_root(tree_root->fs_info, root);
6831 } else {
6832 free_extent_buffer(root->node);
6833 free_extent_buffer(root->commit_root);
6834 kfree(root);
6835 }
6836 out_free:
6837 btrfs_end_transaction_throttle(trans, tree_root);
6838 kfree(wc);
6839 btrfs_free_path(path);
6840 out:
6841 if (err)
6842 btrfs_std_error(root->fs_info, err);
6843 return;
6844 }
6845
6846 /*
6847 * drop subtree rooted at tree block 'node'.
6848 *
6849 * NOTE: this function will unlock and release tree block 'node'
6850 * only used by relocation code
6851 */
6852 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6853 struct btrfs_root *root,
6854 struct extent_buffer *node,
6855 struct extent_buffer *parent)
6856 {
6857 struct btrfs_path *path;
6858 struct walk_control *wc;
6859 int level;
6860 int parent_level;
6861 int ret = 0;
6862 int wret;
6863
6864 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6865
6866 path = btrfs_alloc_path();
6867 if (!path)
6868 return -ENOMEM;
6869
6870 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6871 if (!wc) {
6872 btrfs_free_path(path);
6873 return -ENOMEM;
6874 }
6875
6876 btrfs_assert_tree_locked(parent);
6877 parent_level = btrfs_header_level(parent);
6878 extent_buffer_get(parent);
6879 path->nodes[parent_level] = parent;
6880 path->slots[parent_level] = btrfs_header_nritems(parent);
6881
6882 btrfs_assert_tree_locked(node);
6883 level = btrfs_header_level(node);
6884 path->nodes[level] = node;
6885 path->slots[level] = 0;
6886 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
6887
6888 wc->refs[parent_level] = 1;
6889 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6890 wc->level = level;
6891 wc->shared_level = -1;
6892 wc->stage = DROP_REFERENCE;
6893 wc->update_ref = 0;
6894 wc->keep_locks = 1;
6895 wc->for_reloc = 1;
6896 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6897
6898 while (1) {
6899 wret = walk_down_tree(trans, root, path, wc);
6900 if (wret < 0) {
6901 ret = wret;
6902 break;
6903 }
6904
6905 wret = walk_up_tree(trans, root, path, wc, parent_level);
6906 if (wret < 0)
6907 ret = wret;
6908 if (wret != 0)
6909 break;
6910 }
6911
6912 kfree(wc);
6913 btrfs_free_path(path);
6914 return ret;
6915 }
6916
6917 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6918 {
6919 u64 num_devices;
6920 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6921 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6922
6923 if (root->fs_info->balance_ctl) {
6924 struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
6925 u64 tgt = 0;
6926
6927 /* pick restriper's target profile and return */
6928 if (flags & BTRFS_BLOCK_GROUP_DATA &&
6929 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6930 tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
6931 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
6932 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6933 tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
6934 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
6935 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
6936 tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
6937 }
6938
6939 if (tgt) {
6940 /* extended -> chunk profile */
6941 tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
6942 return tgt;
6943 }
6944 }
6945
6946 /*
6947 * we add in the count of missing devices because we want
6948 * to make sure that any RAID levels on a degraded FS
6949 * continue to be honored.
6950 */
6951 num_devices = root->fs_info->fs_devices->rw_devices +
6952 root->fs_info->fs_devices->missing_devices;
6953
6954 if (num_devices == 1) {
6955 stripped |= BTRFS_BLOCK_GROUP_DUP;
6956 stripped = flags & ~stripped;
6957
6958 /* turn raid0 into single device chunks */
6959 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6960 return stripped;
6961
6962 /* turn mirroring into duplication */
6963 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6964 BTRFS_BLOCK_GROUP_RAID10))
6965 return stripped | BTRFS_BLOCK_GROUP_DUP;
6966 return flags;
6967 } else {
6968 /* they already had raid on here, just return */
6969 if (flags & stripped)
6970 return flags;
6971
6972 stripped |= BTRFS_BLOCK_GROUP_DUP;
6973 stripped = flags & ~stripped;
6974
6975 /* switch duplicated blocks with raid1 */
6976 if (flags & BTRFS_BLOCK_GROUP_DUP)
6977 return stripped | BTRFS_BLOCK_GROUP_RAID1;
6978
6979 /* turn single device chunks into raid0 */
6980 return stripped | BTRFS_BLOCK_GROUP_RAID0;
6981 }
6982 return flags;
6983 }
6984
6985 static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
6986 {
6987 struct btrfs_space_info *sinfo = cache->space_info;
6988 u64 num_bytes;
6989 u64 min_allocable_bytes;
6990 int ret = -ENOSPC;
6991
6992
6993 /*
6994 * We need some metadata space and system metadata space for
6995 * allocating chunks in some corner cases until we force to set
6996 * it to be readonly.
6997 */
6998 if ((sinfo->flags &
6999 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
7000 !force)
7001 min_allocable_bytes = 1 * 1024 * 1024;
7002 else
7003 min_allocable_bytes = 0;
7004
7005 spin_lock(&sinfo->lock);
7006 spin_lock(&cache->lock);
7007
7008 if (cache->ro) {
7009 ret = 0;
7010 goto out;
7011 }
7012
7013 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7014 cache->bytes_super - btrfs_block_group_used(&cache->item);
7015
7016 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
7017 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
7018 min_allocable_bytes <= sinfo->total_bytes) {
7019 sinfo->bytes_readonly += num_bytes;
7020 cache->ro = 1;
7021 ret = 0;
7022 }
7023 out:
7024 spin_unlock(&cache->lock);
7025 spin_unlock(&sinfo->lock);
7026 return ret;
7027 }
7028
7029 int btrfs_set_block_group_ro(struct btrfs_root *root,
7030 struct btrfs_block_group_cache *cache)
7031
7032 {
7033 struct btrfs_trans_handle *trans;
7034 u64 alloc_flags;
7035 int ret;
7036
7037 BUG_ON(cache->ro);
7038
7039 trans = btrfs_join_transaction(root);
7040 BUG_ON(IS_ERR(trans));
7041
7042 alloc_flags = update_block_group_flags(root, cache->flags);
7043 if (alloc_flags != cache->flags)
7044 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7045 CHUNK_ALLOC_FORCE);
7046
7047 ret = set_block_group_ro(cache, 0);
7048 if (!ret)
7049 goto out;
7050 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
7051 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7052 CHUNK_ALLOC_FORCE);
7053 if (ret < 0)
7054 goto out;
7055 ret = set_block_group_ro(cache, 0);
7056 out:
7057 btrfs_end_transaction(trans, root);
7058 return ret;
7059 }
7060
7061 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
7062 struct btrfs_root *root, u64 type)
7063 {
7064 u64 alloc_flags = get_alloc_profile(root, type);
7065 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
7066 CHUNK_ALLOC_FORCE);
7067 }
7068
7069 /*
7070 * helper to account the unused space of all the readonly block group in the
7071 * list. takes mirrors into account.
7072 */
7073 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
7074 {
7075 struct btrfs_block_group_cache *block_group;
7076 u64 free_bytes = 0;
7077 int factor;
7078
7079 list_for_each_entry(block_group, groups_list, list) {
7080 spin_lock(&block_group->lock);
7081
7082 if (!block_group->ro) {
7083 spin_unlock(&block_group->lock);
7084 continue;
7085 }
7086
7087 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
7088 BTRFS_BLOCK_GROUP_RAID10 |
7089 BTRFS_BLOCK_GROUP_DUP))
7090 factor = 2;
7091 else
7092 factor = 1;
7093
7094 free_bytes += (block_group->key.offset -
7095 btrfs_block_group_used(&block_group->item)) *
7096 factor;
7097
7098 spin_unlock(&block_group->lock);
7099 }
7100
7101 return free_bytes;
7102 }
7103
7104 /*
7105 * helper to account the unused space of all the readonly block group in the
7106 * space_info. takes mirrors into account.
7107 */
7108 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
7109 {
7110 int i;
7111 u64 free_bytes = 0;
7112
7113 spin_lock(&sinfo->lock);
7114
7115 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
7116 if (!list_empty(&sinfo->block_groups[i]))
7117 free_bytes += __btrfs_get_ro_block_group_free_space(
7118 &sinfo->block_groups[i]);
7119
7120 spin_unlock(&sinfo->lock);
7121
7122 return free_bytes;
7123 }
7124
7125 int btrfs_set_block_group_rw(struct btrfs_root *root,
7126 struct btrfs_block_group_cache *cache)
7127 {
7128 struct btrfs_space_info *sinfo = cache->space_info;
7129 u64 num_bytes;
7130
7131 BUG_ON(!cache->ro);
7132
7133 spin_lock(&sinfo->lock);
7134 spin_lock(&cache->lock);
7135 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
7136 cache->bytes_super - btrfs_block_group_used(&cache->item);
7137 sinfo->bytes_readonly -= num_bytes;
7138 cache->ro = 0;
7139 spin_unlock(&cache->lock);
7140 spin_unlock(&sinfo->lock);
7141 return 0;
7142 }
7143
7144 /*
7145 * checks to see if its even possible to relocate this block group.
7146 *
7147 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7148 * ok to go ahead and try.
7149 */
7150 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7151 {
7152 struct btrfs_block_group_cache *block_group;
7153 struct btrfs_space_info *space_info;
7154 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7155 struct btrfs_device *device;
7156 u64 min_free;
7157 u64 dev_min = 1;
7158 u64 dev_nr = 0;
7159 int index;
7160 int full = 0;
7161 int ret = 0;
7162
7163 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7164
7165 /* odd, couldn't find the block group, leave it alone */
7166 if (!block_group)
7167 return -1;
7168
7169 min_free = btrfs_block_group_used(&block_group->item);
7170
7171 /* no bytes used, we're good */
7172 if (!min_free)
7173 goto out;
7174
7175 space_info = block_group->space_info;
7176 spin_lock(&space_info->lock);
7177
7178 full = space_info->full;
7179
7180 /*
7181 * if this is the last block group we have in this space, we can't
7182 * relocate it unless we're able to allocate a new chunk below.
7183 *
7184 * Otherwise, we need to make sure we have room in the space to handle
7185 * all of the extents from this block group. If we can, we're good
7186 */
7187 if ((space_info->total_bytes != block_group->key.offset) &&
7188 (space_info->bytes_used + space_info->bytes_reserved +
7189 space_info->bytes_pinned + space_info->bytes_readonly +
7190 min_free < space_info->total_bytes)) {
7191 spin_unlock(&space_info->lock);
7192 goto out;
7193 }
7194 spin_unlock(&space_info->lock);
7195
7196 /*
7197 * ok we don't have enough space, but maybe we have free space on our
7198 * devices to allocate new chunks for relocation, so loop through our
7199 * alloc devices and guess if we have enough space. However, if we
7200 * were marked as full, then we know there aren't enough chunks, and we
7201 * can just return.
7202 */
7203 ret = -1;
7204 if (full)
7205 goto out;
7206
7207 /*
7208 * index:
7209 * 0: raid10
7210 * 1: raid1
7211 * 2: dup
7212 * 3: raid0
7213 * 4: single
7214 */
7215 index = get_block_group_index(block_group);
7216 if (index == 0) {
7217 dev_min = 4;
7218 /* Divide by 2 */
7219 min_free >>= 1;
7220 } else if (index == 1) {
7221 dev_min = 2;
7222 } else if (index == 2) {
7223 /* Multiply by 2 */
7224 min_free <<= 1;
7225 } else if (index == 3) {
7226 dev_min = fs_devices->rw_devices;
7227 do_div(min_free, dev_min);
7228 }
7229
7230 mutex_lock(&root->fs_info->chunk_mutex);
7231 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7232 u64 dev_offset;
7233
7234 /*
7235 * check to make sure we can actually find a chunk with enough
7236 * space to fit our block group in.
7237 */
7238 if (device->total_bytes > device->bytes_used + min_free) {
7239 ret = find_free_dev_extent(device, min_free,
7240 &dev_offset, NULL);
7241 if (!ret)
7242 dev_nr++;
7243
7244 if (dev_nr >= dev_min)
7245 break;
7246
7247 ret = -1;
7248 }
7249 }
7250 mutex_unlock(&root->fs_info->chunk_mutex);
7251 out:
7252 btrfs_put_block_group(block_group);
7253 return ret;
7254 }
7255
7256 static int find_first_block_group(struct btrfs_root *root,
7257 struct btrfs_path *path, struct btrfs_key *key)
7258 {
7259 int ret = 0;
7260 struct btrfs_key found_key;
7261 struct extent_buffer *leaf;
7262 int slot;
7263
7264 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7265 if (ret < 0)
7266 goto out;
7267
7268 while (1) {
7269 slot = path->slots[0];
7270 leaf = path->nodes[0];
7271 if (slot >= btrfs_header_nritems(leaf)) {
7272 ret = btrfs_next_leaf(root, path);
7273 if (ret == 0)
7274 continue;
7275 if (ret < 0)
7276 goto out;
7277 break;
7278 }
7279 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7280
7281 if (found_key.objectid >= key->objectid &&
7282 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7283 ret = 0;
7284 goto out;
7285 }
7286 path->slots[0]++;
7287 }
7288 out:
7289 return ret;
7290 }
7291
7292 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
7293 {
7294 struct btrfs_block_group_cache *block_group;
7295 u64 last = 0;
7296
7297 while (1) {
7298 struct inode *inode;
7299
7300 block_group = btrfs_lookup_first_block_group(info, last);
7301 while (block_group) {
7302 spin_lock(&block_group->lock);
7303 if (block_group->iref)
7304 break;
7305 spin_unlock(&block_group->lock);
7306 block_group = next_block_group(info->tree_root,
7307 block_group);
7308 }
7309 if (!block_group) {
7310 if (last == 0)
7311 break;
7312 last = 0;
7313 continue;
7314 }
7315
7316 inode = block_group->inode;
7317 block_group->iref = 0;
7318 block_group->inode = NULL;
7319 spin_unlock(&block_group->lock);
7320 iput(inode);
7321 last = block_group->key.objectid + block_group->key.offset;
7322 btrfs_put_block_group(block_group);
7323 }
7324 }
7325
7326 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7327 {
7328 struct btrfs_block_group_cache *block_group;
7329 struct btrfs_space_info *space_info;
7330 struct btrfs_caching_control *caching_ctl;
7331 struct rb_node *n;
7332
7333 down_write(&info->extent_commit_sem);
7334 while (!list_empty(&info->caching_block_groups)) {
7335 caching_ctl = list_entry(info->caching_block_groups.next,
7336 struct btrfs_caching_control, list);
7337 list_del(&caching_ctl->list);
7338 put_caching_control(caching_ctl);
7339 }
7340 up_write(&info->extent_commit_sem);
7341
7342 spin_lock(&info->block_group_cache_lock);
7343 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7344 block_group = rb_entry(n, struct btrfs_block_group_cache,
7345 cache_node);
7346 rb_erase(&block_group->cache_node,
7347 &info->block_group_cache_tree);
7348 spin_unlock(&info->block_group_cache_lock);
7349
7350 down_write(&block_group->space_info->groups_sem);
7351 list_del(&block_group->list);
7352 up_write(&block_group->space_info->groups_sem);
7353
7354 if (block_group->cached == BTRFS_CACHE_STARTED)
7355 wait_block_group_cache_done(block_group);
7356
7357 /*
7358 * We haven't cached this block group, which means we could
7359 * possibly have excluded extents on this block group.
7360 */
7361 if (block_group->cached == BTRFS_CACHE_NO)
7362 free_excluded_extents(info->extent_root, block_group);
7363
7364 btrfs_remove_free_space_cache(block_group);
7365 btrfs_put_block_group(block_group);
7366
7367 spin_lock(&info->block_group_cache_lock);
7368 }
7369 spin_unlock(&info->block_group_cache_lock);
7370
7371 /* now that all the block groups are freed, go through and
7372 * free all the space_info structs. This is only called during
7373 * the final stages of unmount, and so we know nobody is
7374 * using them. We call synchronize_rcu() once before we start,
7375 * just to be on the safe side.
7376 */
7377 synchronize_rcu();
7378
7379 release_global_block_rsv(info);
7380
7381 while(!list_empty(&info->space_info)) {
7382 space_info = list_entry(info->space_info.next,
7383 struct btrfs_space_info,
7384 list);
7385 if (space_info->bytes_pinned > 0 ||
7386 space_info->bytes_reserved > 0 ||
7387 space_info->bytes_may_use > 0) {
7388 WARN_ON(1);
7389 dump_space_info(space_info, 0, 0);
7390 }
7391 list_del(&space_info->list);
7392 kfree(space_info);
7393 }
7394 return 0;
7395 }
7396
7397 static void __link_block_group(struct btrfs_space_info *space_info,
7398 struct btrfs_block_group_cache *cache)
7399 {
7400 int index = get_block_group_index(cache);
7401
7402 down_write(&space_info->groups_sem);
7403 list_add_tail(&cache->list, &space_info->block_groups[index]);
7404 up_write(&space_info->groups_sem);
7405 }
7406
7407 int btrfs_read_block_groups(struct btrfs_root *root)
7408 {
7409 struct btrfs_path *path;
7410 int ret;
7411 struct btrfs_block_group_cache *cache;
7412 struct btrfs_fs_info *info = root->fs_info;
7413 struct btrfs_space_info *space_info;
7414 struct btrfs_key key;
7415 struct btrfs_key found_key;
7416 struct extent_buffer *leaf;
7417 int need_clear = 0;
7418 u64 cache_gen;
7419
7420 root = info->extent_root;
7421 key.objectid = 0;
7422 key.offset = 0;
7423 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7424 path = btrfs_alloc_path();
7425 if (!path)
7426 return -ENOMEM;
7427 path->reada = 1;
7428
7429 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
7430 if (btrfs_test_opt(root, SPACE_CACHE) &&
7431 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
7432 need_clear = 1;
7433 if (btrfs_test_opt(root, CLEAR_CACHE))
7434 need_clear = 1;
7435
7436 while (1) {
7437 ret = find_first_block_group(root, path, &key);
7438 if (ret > 0)
7439 break;
7440 if (ret != 0)
7441 goto error;
7442 leaf = path->nodes[0];
7443 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7444 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7445 if (!cache) {
7446 ret = -ENOMEM;
7447 goto error;
7448 }
7449 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7450 GFP_NOFS);
7451 if (!cache->free_space_ctl) {
7452 kfree(cache);
7453 ret = -ENOMEM;
7454 goto error;
7455 }
7456
7457 atomic_set(&cache->count, 1);
7458 spin_lock_init(&cache->lock);
7459 cache->fs_info = info;
7460 INIT_LIST_HEAD(&cache->list);
7461 INIT_LIST_HEAD(&cache->cluster_list);
7462
7463 if (need_clear)
7464 cache->disk_cache_state = BTRFS_DC_CLEAR;
7465
7466 read_extent_buffer(leaf, &cache->item,
7467 btrfs_item_ptr_offset(leaf, path->slots[0]),
7468 sizeof(cache->item));
7469 memcpy(&cache->key, &found_key, sizeof(found_key));
7470
7471 key.objectid = found_key.objectid + found_key.offset;
7472 btrfs_release_path(path);
7473 cache->flags = btrfs_block_group_flags(&cache->item);
7474 cache->sectorsize = root->sectorsize;
7475
7476 btrfs_init_free_space_ctl(cache);
7477
7478 /*
7479 * We need to exclude the super stripes now so that the space
7480 * info has super bytes accounted for, otherwise we'll think
7481 * we have more space than we actually do.
7482 */
7483 exclude_super_stripes(root, cache);
7484
7485 /*
7486 * check for two cases, either we are full, and therefore
7487 * don't need to bother with the caching work since we won't
7488 * find any space, or we are empty, and we can just add all
7489 * the space in and be done with it. This saves us _alot_ of
7490 * time, particularly in the full case.
7491 */
7492 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7493 cache->last_byte_to_unpin = (u64)-1;
7494 cache->cached = BTRFS_CACHE_FINISHED;
7495 free_excluded_extents(root, cache);
7496 } else if (btrfs_block_group_used(&cache->item) == 0) {
7497 cache->last_byte_to_unpin = (u64)-1;
7498 cache->cached = BTRFS_CACHE_FINISHED;
7499 add_new_free_space(cache, root->fs_info,
7500 found_key.objectid,
7501 found_key.objectid +
7502 found_key.offset);
7503 free_excluded_extents(root, cache);
7504 }
7505
7506 ret = update_space_info(info, cache->flags, found_key.offset,
7507 btrfs_block_group_used(&cache->item),
7508 &space_info);
7509 BUG_ON(ret);
7510 cache->space_info = space_info;
7511 spin_lock(&cache->space_info->lock);
7512 cache->space_info->bytes_readonly += cache->bytes_super;
7513 spin_unlock(&cache->space_info->lock);
7514
7515 __link_block_group(space_info, cache);
7516
7517 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7518 BUG_ON(ret);
7519
7520 set_avail_alloc_bits(root->fs_info, cache->flags);
7521 if (btrfs_chunk_readonly(root, cache->key.objectid))
7522 set_block_group_ro(cache, 1);
7523 }
7524
7525 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7526 if (!(get_alloc_profile(root, space_info->flags) &
7527 (BTRFS_BLOCK_GROUP_RAID10 |
7528 BTRFS_BLOCK_GROUP_RAID1 |
7529 BTRFS_BLOCK_GROUP_DUP)))
7530 continue;
7531 /*
7532 * avoid allocating from un-mirrored block group if there are
7533 * mirrored block groups.
7534 */
7535 list_for_each_entry(cache, &space_info->block_groups[3], list)
7536 set_block_group_ro(cache, 1);
7537 list_for_each_entry(cache, &space_info->block_groups[4], list)
7538 set_block_group_ro(cache, 1);
7539 }
7540
7541 init_global_block_rsv(info);
7542 ret = 0;
7543 error:
7544 btrfs_free_path(path);
7545 return ret;
7546 }
7547
7548 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7549 struct btrfs_root *root, u64 bytes_used,
7550 u64 type, u64 chunk_objectid, u64 chunk_offset,
7551 u64 size)
7552 {
7553 int ret;
7554 struct btrfs_root *extent_root;
7555 struct btrfs_block_group_cache *cache;
7556
7557 extent_root = root->fs_info->extent_root;
7558
7559 root->fs_info->last_trans_log_full_commit = trans->transid;
7560
7561 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7562 if (!cache)
7563 return -ENOMEM;
7564 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
7565 GFP_NOFS);
7566 if (!cache->free_space_ctl) {
7567 kfree(cache);
7568 return -ENOMEM;
7569 }
7570
7571 cache->key.objectid = chunk_offset;
7572 cache->key.offset = size;
7573 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7574 cache->sectorsize = root->sectorsize;
7575 cache->fs_info = root->fs_info;
7576
7577 atomic_set(&cache->count, 1);
7578 spin_lock_init(&cache->lock);
7579 INIT_LIST_HEAD(&cache->list);
7580 INIT_LIST_HEAD(&cache->cluster_list);
7581
7582 btrfs_init_free_space_ctl(cache);
7583
7584 btrfs_set_block_group_used(&cache->item, bytes_used);
7585 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7586 cache->flags = type;
7587 btrfs_set_block_group_flags(&cache->item, type);
7588
7589 cache->last_byte_to_unpin = (u64)-1;
7590 cache->cached = BTRFS_CACHE_FINISHED;
7591 exclude_super_stripes(root, cache);
7592
7593 add_new_free_space(cache, root->fs_info, chunk_offset,
7594 chunk_offset + size);
7595
7596 free_excluded_extents(root, cache);
7597
7598 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7599 &cache->space_info);
7600 BUG_ON(ret);
7601 update_global_block_rsv(root->fs_info);
7602
7603 spin_lock(&cache->space_info->lock);
7604 cache->space_info->bytes_readonly += cache->bytes_super;
7605 spin_unlock(&cache->space_info->lock);
7606
7607 __link_block_group(cache->space_info, cache);
7608
7609 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7610 BUG_ON(ret);
7611
7612 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7613 sizeof(cache->item));
7614 BUG_ON(ret);
7615
7616 set_avail_alloc_bits(extent_root->fs_info, type);
7617
7618 return 0;
7619 }
7620
7621 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
7622 {
7623 u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
7624
7625 /* chunk -> extended profile */
7626 if (extra_flags == 0)
7627 extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
7628
7629 if (flags & BTRFS_BLOCK_GROUP_DATA)
7630 fs_info->avail_data_alloc_bits &= ~extra_flags;
7631 if (flags & BTRFS_BLOCK_GROUP_METADATA)
7632 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
7633 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
7634 fs_info->avail_system_alloc_bits &= ~extra_flags;
7635 }
7636
7637 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7638 struct btrfs_root *root, u64 group_start)
7639 {
7640 struct btrfs_path *path;
7641 struct btrfs_block_group_cache *block_group;
7642 struct btrfs_free_cluster *cluster;
7643 struct btrfs_root *tree_root = root->fs_info->tree_root;
7644 struct btrfs_key key;
7645 struct inode *inode;
7646 int ret;
7647 int index;
7648 int factor;
7649
7650 root = root->fs_info->extent_root;
7651
7652 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7653 BUG_ON(!block_group);
7654 BUG_ON(!block_group->ro);
7655
7656 /*
7657 * Free the reserved super bytes from this block group before
7658 * remove it.
7659 */
7660 free_excluded_extents(root, block_group);
7661
7662 memcpy(&key, &block_group->key, sizeof(key));
7663 index = get_block_group_index(block_group);
7664 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
7665 BTRFS_BLOCK_GROUP_RAID1 |
7666 BTRFS_BLOCK_GROUP_RAID10))
7667 factor = 2;
7668 else
7669 factor = 1;
7670
7671 /* make sure this block group isn't part of an allocation cluster */
7672 cluster = &root->fs_info->data_alloc_cluster;
7673 spin_lock(&cluster->refill_lock);
7674 btrfs_return_cluster_to_free_space(block_group, cluster);
7675 spin_unlock(&cluster->refill_lock);
7676
7677 /*
7678 * make sure this block group isn't part of a metadata
7679 * allocation cluster
7680 */
7681 cluster = &root->fs_info->meta_alloc_cluster;
7682 spin_lock(&cluster->refill_lock);
7683 btrfs_return_cluster_to_free_space(block_group, cluster);
7684 spin_unlock(&cluster->refill_lock);
7685
7686 path = btrfs_alloc_path();
7687 if (!path) {
7688 ret = -ENOMEM;
7689 goto out;
7690 }
7691
7692 inode = lookup_free_space_inode(tree_root, block_group, path);
7693 if (!IS_ERR(inode)) {
7694 ret = btrfs_orphan_add(trans, inode);
7695 BUG_ON(ret);
7696 clear_nlink(inode);
7697 /* One for the block groups ref */
7698 spin_lock(&block_group->lock);
7699 if (block_group->iref) {
7700 block_group->iref = 0;
7701 block_group->inode = NULL;
7702 spin_unlock(&block_group->lock);
7703 iput(inode);
7704 } else {
7705 spin_unlock(&block_group->lock);
7706 }
7707 /* One for our lookup ref */
7708 btrfs_add_delayed_iput(inode);
7709 }
7710
7711 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
7712 key.offset = block_group->key.objectid;
7713 key.type = 0;
7714
7715 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
7716 if (ret < 0)
7717 goto out;
7718 if (ret > 0)
7719 btrfs_release_path(path);
7720 if (ret == 0) {
7721 ret = btrfs_del_item(trans, tree_root, path);
7722 if (ret)
7723 goto out;
7724 btrfs_release_path(path);
7725 }
7726
7727 spin_lock(&root->fs_info->block_group_cache_lock);
7728 rb_erase(&block_group->cache_node,
7729 &root->fs_info->block_group_cache_tree);
7730 spin_unlock(&root->fs_info->block_group_cache_lock);
7731
7732 down_write(&block_group->space_info->groups_sem);
7733 /*
7734 * we must use list_del_init so people can check to see if they
7735 * are still on the list after taking the semaphore
7736 */
7737 list_del_init(&block_group->list);
7738 if (list_empty(&block_group->space_info->block_groups[index]))
7739 clear_avail_alloc_bits(root->fs_info, block_group->flags);
7740 up_write(&block_group->space_info->groups_sem);
7741
7742 if (block_group->cached == BTRFS_CACHE_STARTED)
7743 wait_block_group_cache_done(block_group);
7744
7745 btrfs_remove_free_space_cache(block_group);
7746
7747 spin_lock(&block_group->space_info->lock);
7748 block_group->space_info->total_bytes -= block_group->key.offset;
7749 block_group->space_info->bytes_readonly -= block_group->key.offset;
7750 block_group->space_info->disk_total -= block_group->key.offset * factor;
7751 spin_unlock(&block_group->space_info->lock);
7752
7753 memcpy(&key, &block_group->key, sizeof(key));
7754
7755 btrfs_clear_space_info_full(root->fs_info);
7756
7757 btrfs_put_block_group(block_group);
7758 btrfs_put_block_group(block_group);
7759
7760 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7761 if (ret > 0)
7762 ret = -EIO;
7763 if (ret < 0)
7764 goto out;
7765
7766 ret = btrfs_del_item(trans, root, path);
7767 out:
7768 btrfs_free_path(path);
7769 return ret;
7770 }
7771
7772 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
7773 {
7774 struct btrfs_space_info *space_info;
7775 struct btrfs_super_block *disk_super;
7776 u64 features;
7777 u64 flags;
7778 int mixed = 0;
7779 int ret;
7780
7781 disk_super = fs_info->super_copy;
7782 if (!btrfs_super_root(disk_super))
7783 return 1;
7784
7785 features = btrfs_super_incompat_flags(disk_super);
7786 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
7787 mixed = 1;
7788
7789 flags = BTRFS_BLOCK_GROUP_SYSTEM;
7790 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7791 if (ret)
7792 goto out;
7793
7794 if (mixed) {
7795 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
7796 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7797 } else {
7798 flags = BTRFS_BLOCK_GROUP_METADATA;
7799 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7800 if (ret)
7801 goto out;
7802
7803 flags = BTRFS_BLOCK_GROUP_DATA;
7804 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
7805 }
7806 out:
7807 return ret;
7808 }
7809
7810 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
7811 {
7812 return unpin_extent_range(root, start, end);
7813 }
7814
7815 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
7816 u64 num_bytes, u64 *actual_bytes)
7817 {
7818 return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
7819 }
7820
7821 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
7822 {
7823 struct btrfs_fs_info *fs_info = root->fs_info;
7824 struct btrfs_block_group_cache *cache = NULL;
7825 u64 group_trimmed;
7826 u64 start;
7827 u64 end;
7828 u64 trimmed = 0;
7829 int ret = 0;
7830
7831 cache = btrfs_lookup_block_group(fs_info, range->start);
7832
7833 while (cache) {
7834 if (cache->key.objectid >= (range->start + range->len)) {
7835 btrfs_put_block_group(cache);
7836 break;
7837 }
7838
7839 start = max(range->start, cache->key.objectid);
7840 end = min(range->start + range->len,
7841 cache->key.objectid + cache->key.offset);
7842
7843 if (end - start >= range->minlen) {
7844 if (!block_group_cache_done(cache)) {
7845 ret = cache_block_group(cache, NULL, root, 0);
7846 if (!ret)
7847 wait_block_group_cache_done(cache);
7848 }
7849 ret = btrfs_trim_block_group(cache,
7850 &group_trimmed,
7851 start,
7852 end,
7853 range->minlen);
7854
7855 trimmed += group_trimmed;
7856 if (ret) {
7857 btrfs_put_block_group(cache);
7858 break;
7859 }
7860 }
7861
7862 cache = next_block_group(fs_info->tree_root, cache);
7863 }
7864
7865 range->len = trimmed;
7866 return ret;
7867 }
This page took 0.221259 seconds and 6 git commands to generate.