Btrfs: Kill init_btrfs_i()
[deliverable/linux.git] / fs / btrfs / extent-tree.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
ec6b910f 18#include <linux/sched.h>
edbd8d4e 19#include <linux/pagemap.h>
ec44a35c 20#include <linux/writeback.h>
21af804c 21#include <linux/blkdev.h>
b7a9f29f 22#include <linux/sort.h>
4184ea7f 23#include <linux/rcupdate.h>
817d52f8 24#include <linux/kthread.h>
5a0e3ad6 25#include <linux/slab.h>
4b4e25f2 26#include "compat.h"
74493f7a 27#include "hash.h"
fec577fb
CM
28#include "ctree.h"
29#include "disk-io.h"
30#include "print-tree.h"
e089f05c 31#include "transaction.h"
0b86a832 32#include "volumes.h"
925baedd 33#include "locking.h"
fa9c0d79 34#include "free-space-cache.h"
fec577fb 35
f3465ca4
JB
36static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc,
39 int mark_free);
11833d66
YZ
40static int update_reserved_extents(struct btrfs_block_group_cache *cache,
41 u64 num_bytes, int reserve);
5d4f98a2
YZ
42static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
43 struct btrfs_root *root,
44 u64 bytenr, u64 num_bytes, u64 parent,
45 u64 root_objectid, u64 owner_objectid,
46 u64 owner_offset, int refs_to_drop,
47 struct btrfs_delayed_extent_op *extra_op);
48static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
49 struct extent_buffer *leaf,
50 struct btrfs_extent_item *ei);
51static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
52 struct btrfs_root *root,
53 u64 parent, u64 root_objectid,
54 u64 flags, u64 owner, u64 offset,
55 struct btrfs_key *ins, int ref_mod);
56static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
57 struct btrfs_root *root,
58 u64 parent, u64 root_objectid,
59 u64 flags, struct btrfs_disk_key *key,
60 int level, struct btrfs_key *ins);
6a63209f
JB
61static int do_chunk_alloc(struct btrfs_trans_handle *trans,
62 struct btrfs_root *extent_root, u64 alloc_bytes,
63 u64 flags, int force);
11833d66
YZ
64static int pin_down_bytes(struct btrfs_trans_handle *trans,
65 struct btrfs_root *root,
66 struct btrfs_path *path,
67 u64 bytenr, u64 num_bytes,
68 int is_data, int reserved,
69 struct extent_buffer **must_clean);
70static int find_next_key(struct btrfs_path *path, int level,
71 struct btrfs_key *key);
9ed74f2d
JB
72static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
73 int dump_block_groups);
424499db
YZ
74static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
75 struct btrfs_root *root,
76 struct btrfs_space_info *sinfo, u64 num_bytes);
5da9d01b
YZ
77static int shrink_delalloc(struct btrfs_trans_handle *trans,
78 struct btrfs_root *root,
79 struct btrfs_space_info *sinfo, u64 to_reclaim);
6a63209f 80
817d52f8
JB
81static noinline int
82block_group_cache_done(struct btrfs_block_group_cache *cache)
83{
84 smp_mb();
85 return cache->cached == BTRFS_CACHE_FINISHED;
86}
87
0f9dd46c
JB
88static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
89{
90 return (cache->flags & bits) == bits;
91}
92
11dfe35a
JB
93void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
94{
95 atomic_inc(&cache->count);
96}
97
98void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
99{
100 if (atomic_dec_and_test(&cache->count))
101 kfree(cache);
102}
103
0f9dd46c
JB
104/*
105 * this adds the block group to the fs_info rb tree for the block group
106 * cache
107 */
b2950863 108static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
0f9dd46c
JB
109 struct btrfs_block_group_cache *block_group)
110{
111 struct rb_node **p;
112 struct rb_node *parent = NULL;
113 struct btrfs_block_group_cache *cache;
114
115 spin_lock(&info->block_group_cache_lock);
116 p = &info->block_group_cache_tree.rb_node;
117
118 while (*p) {
119 parent = *p;
120 cache = rb_entry(parent, struct btrfs_block_group_cache,
121 cache_node);
122 if (block_group->key.objectid < cache->key.objectid) {
123 p = &(*p)->rb_left;
124 } else if (block_group->key.objectid > cache->key.objectid) {
125 p = &(*p)->rb_right;
126 } else {
127 spin_unlock(&info->block_group_cache_lock);
128 return -EEXIST;
129 }
130 }
131
132 rb_link_node(&block_group->cache_node, parent, p);
133 rb_insert_color(&block_group->cache_node,
134 &info->block_group_cache_tree);
135 spin_unlock(&info->block_group_cache_lock);
136
137 return 0;
138}
139
140/*
141 * This will return the block group at or after bytenr if contains is 0, else
142 * it will return the block group that contains the bytenr
143 */
144static struct btrfs_block_group_cache *
145block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
146 int contains)
147{
148 struct btrfs_block_group_cache *cache, *ret = NULL;
149 struct rb_node *n;
150 u64 end, start;
151
152 spin_lock(&info->block_group_cache_lock);
153 n = info->block_group_cache_tree.rb_node;
154
155 while (n) {
156 cache = rb_entry(n, struct btrfs_block_group_cache,
157 cache_node);
158 end = cache->key.objectid + cache->key.offset - 1;
159 start = cache->key.objectid;
160
161 if (bytenr < start) {
162 if (!contains && (!ret || start < ret->key.objectid))
163 ret = cache;
164 n = n->rb_left;
165 } else if (bytenr > start) {
166 if (contains && bytenr <= end) {
167 ret = cache;
168 break;
169 }
170 n = n->rb_right;
171 } else {
172 ret = cache;
173 break;
174 }
175 }
d2fb3437 176 if (ret)
11dfe35a 177 btrfs_get_block_group(ret);
0f9dd46c
JB
178 spin_unlock(&info->block_group_cache_lock);
179
180 return ret;
181}
182
11833d66
YZ
183static int add_excluded_extent(struct btrfs_root *root,
184 u64 start, u64 num_bytes)
817d52f8 185{
11833d66
YZ
186 u64 end = start + num_bytes - 1;
187 set_extent_bits(&root->fs_info->freed_extents[0],
188 start, end, EXTENT_UPTODATE, GFP_NOFS);
189 set_extent_bits(&root->fs_info->freed_extents[1],
190 start, end, EXTENT_UPTODATE, GFP_NOFS);
191 return 0;
192}
817d52f8 193
11833d66
YZ
194static void free_excluded_extents(struct btrfs_root *root,
195 struct btrfs_block_group_cache *cache)
196{
197 u64 start, end;
817d52f8 198
11833d66
YZ
199 start = cache->key.objectid;
200 end = start + cache->key.offset - 1;
201
202 clear_extent_bits(&root->fs_info->freed_extents[0],
203 start, end, EXTENT_UPTODATE, GFP_NOFS);
204 clear_extent_bits(&root->fs_info->freed_extents[1],
205 start, end, EXTENT_UPTODATE, GFP_NOFS);
817d52f8
JB
206}
207
11833d66
YZ
208static int exclude_super_stripes(struct btrfs_root *root,
209 struct btrfs_block_group_cache *cache)
817d52f8 210{
817d52f8
JB
211 u64 bytenr;
212 u64 *logical;
213 int stripe_len;
214 int i, nr, ret;
215
06b2331f
YZ
216 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
217 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
218 cache->bytes_super += stripe_len;
219 ret = add_excluded_extent(root, cache->key.objectid,
220 stripe_len);
221 BUG_ON(ret);
222 }
223
817d52f8
JB
224 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
225 bytenr = btrfs_sb_offset(i);
226 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
227 cache->key.objectid, bytenr,
228 0, &logical, &nr, &stripe_len);
229 BUG_ON(ret);
11833d66 230
817d52f8 231 while (nr--) {
1b2da372 232 cache->bytes_super += stripe_len;
11833d66
YZ
233 ret = add_excluded_extent(root, logical[nr],
234 stripe_len);
235 BUG_ON(ret);
817d52f8 236 }
11833d66 237
817d52f8
JB
238 kfree(logical);
239 }
817d52f8
JB
240 return 0;
241}
242
11833d66
YZ
243static struct btrfs_caching_control *
244get_caching_control(struct btrfs_block_group_cache *cache)
245{
246 struct btrfs_caching_control *ctl;
247
248 spin_lock(&cache->lock);
249 if (cache->cached != BTRFS_CACHE_STARTED) {
250 spin_unlock(&cache->lock);
251 return NULL;
252 }
253
254 ctl = cache->caching_ctl;
255 atomic_inc(&ctl->count);
256 spin_unlock(&cache->lock);
257 return ctl;
258}
259
260static void put_caching_control(struct btrfs_caching_control *ctl)
261{
262 if (atomic_dec_and_test(&ctl->count))
263 kfree(ctl);
264}
265
0f9dd46c
JB
266/*
267 * this is only called by cache_block_group, since we could have freed extents
268 * we need to check the pinned_extents for any extents that can't be used yet
269 * since their free space will be released as soon as the transaction commits.
270 */
817d52f8 271static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
0f9dd46c
JB
272 struct btrfs_fs_info *info, u64 start, u64 end)
273{
817d52f8 274 u64 extent_start, extent_end, size, total_added = 0;
0f9dd46c
JB
275 int ret;
276
277 while (start < end) {
11833d66 278 ret = find_first_extent_bit(info->pinned_extents, start,
0f9dd46c 279 &extent_start, &extent_end,
11833d66 280 EXTENT_DIRTY | EXTENT_UPTODATE);
0f9dd46c
JB
281 if (ret)
282 break;
283
06b2331f 284 if (extent_start <= start) {
0f9dd46c
JB
285 start = extent_end + 1;
286 } else if (extent_start > start && extent_start < end) {
287 size = extent_start - start;
817d52f8 288 total_added += size;
ea6a478e
JB
289 ret = btrfs_add_free_space(block_group, start,
290 size);
0f9dd46c
JB
291 BUG_ON(ret);
292 start = extent_end + 1;
293 } else {
294 break;
295 }
296 }
297
298 if (start < end) {
299 size = end - start;
817d52f8 300 total_added += size;
ea6a478e 301 ret = btrfs_add_free_space(block_group, start, size);
0f9dd46c
JB
302 BUG_ON(ret);
303 }
304
817d52f8 305 return total_added;
0f9dd46c
JB
306}
307
817d52f8 308static int caching_kthread(void *data)
e37c9e69 309{
817d52f8
JB
310 struct btrfs_block_group_cache *block_group = data;
311 struct btrfs_fs_info *fs_info = block_group->fs_info;
11833d66
YZ
312 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
313 struct btrfs_root *extent_root = fs_info->extent_root;
e37c9e69 314 struct btrfs_path *path;
5f39d397 315 struct extent_buffer *leaf;
11833d66 316 struct btrfs_key key;
817d52f8 317 u64 total_found = 0;
11833d66
YZ
318 u64 last = 0;
319 u32 nritems;
320 int ret = 0;
f510cfec 321
e37c9e69
CM
322 path = btrfs_alloc_path();
323 if (!path)
324 return -ENOMEM;
7d7d6068 325
11833d66 326 exclude_super_stripes(extent_root, block_group);
1b2da372
JB
327 spin_lock(&block_group->space_info->lock);
328 block_group->space_info->bytes_super += block_group->bytes_super;
329 spin_unlock(&block_group->space_info->lock);
11833d66 330
817d52f8 331 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
11833d66 332
5cd57b2c 333 /*
817d52f8
JB
334 * We don't want to deadlock with somebody trying to allocate a new
335 * extent for the extent root while also trying to search the extent
336 * root to add free space. So we skip locking and search the commit
337 * root, since its read-only
5cd57b2c
CM
338 */
339 path->skip_locking = 1;
817d52f8
JB
340 path->search_commit_root = 1;
341 path->reada = 2;
342
e4404d6e 343 key.objectid = last;
e37c9e69 344 key.offset = 0;
11833d66 345 key.type = BTRFS_EXTENT_ITEM_KEY;
013f1b12 346again:
11833d66 347 mutex_lock(&caching_ctl->mutex);
013f1b12
CM
348 /* need to make sure the commit_root doesn't disappear */
349 down_read(&fs_info->extent_commit_sem);
350
11833d66 351 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
e37c9e69 352 if (ret < 0)
ef8bbdfe 353 goto err;
a512bbf8 354
11833d66
YZ
355 leaf = path->nodes[0];
356 nritems = btrfs_header_nritems(leaf);
357
d397712b 358 while (1) {
817d52f8 359 smp_mb();
11833d66 360 if (fs_info->closing > 1) {
f25784b3 361 last = (u64)-1;
817d52f8 362 break;
f25784b3 363 }
817d52f8 364
11833d66
YZ
365 if (path->slots[0] < nritems) {
366 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
367 } else {
368 ret = find_next_key(path, 0, &key);
369 if (ret)
e37c9e69 370 break;
817d52f8 371
11833d66
YZ
372 caching_ctl->progress = last;
373 btrfs_release_path(extent_root, path);
374 up_read(&fs_info->extent_commit_sem);
375 mutex_unlock(&caching_ctl->mutex);
376 if (btrfs_transaction_in_commit(fs_info))
f36f3042 377 schedule_timeout(1);
11833d66
YZ
378 else
379 cond_resched();
380 goto again;
381 }
817d52f8 382
11833d66
YZ
383 if (key.objectid < block_group->key.objectid) {
384 path->slots[0]++;
817d52f8 385 continue;
e37c9e69 386 }
0f9dd46c 387
e37c9e69 388 if (key.objectid >= block_group->key.objectid +
0f9dd46c 389 block_group->key.offset)
e37c9e69 390 break;
7d7d6068 391
11833d66 392 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
817d52f8
JB
393 total_found += add_new_free_space(block_group,
394 fs_info, last,
395 key.objectid);
7d7d6068 396 last = key.objectid + key.offset;
817d52f8 397
11833d66
YZ
398 if (total_found > (1024 * 1024 * 2)) {
399 total_found = 0;
400 wake_up(&caching_ctl->wait);
401 }
817d52f8 402 }
e37c9e69
CM
403 path->slots[0]++;
404 }
817d52f8 405 ret = 0;
e37c9e69 406
817d52f8
JB
407 total_found += add_new_free_space(block_group, fs_info, last,
408 block_group->key.objectid +
409 block_group->key.offset);
11833d66 410 caching_ctl->progress = (u64)-1;
817d52f8
JB
411
412 spin_lock(&block_group->lock);
11833d66 413 block_group->caching_ctl = NULL;
817d52f8
JB
414 block_group->cached = BTRFS_CACHE_FINISHED;
415 spin_unlock(&block_group->lock);
0f9dd46c 416
54aa1f4d 417err:
e37c9e69 418 btrfs_free_path(path);
276e680d 419 up_read(&fs_info->extent_commit_sem);
817d52f8 420
11833d66
YZ
421 free_excluded_extents(extent_root, block_group);
422
423 mutex_unlock(&caching_ctl->mutex);
424 wake_up(&caching_ctl->wait);
425
426 put_caching_control(caching_ctl);
427 atomic_dec(&block_group->space_info->caching_threads);
11dfe35a
JB
428 btrfs_put_block_group(block_group);
429
817d52f8
JB
430 return 0;
431}
432
433static int cache_block_group(struct btrfs_block_group_cache *cache)
434{
11833d66
YZ
435 struct btrfs_fs_info *fs_info = cache->fs_info;
436 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
437 struct task_struct *tsk;
438 int ret = 0;
439
11833d66
YZ
440 smp_mb();
441 if (cache->cached != BTRFS_CACHE_NO)
442 return 0;
443
444 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
445 BUG_ON(!caching_ctl);
446
447 INIT_LIST_HEAD(&caching_ctl->list);
448 mutex_init(&caching_ctl->mutex);
449 init_waitqueue_head(&caching_ctl->wait);
450 caching_ctl->block_group = cache;
451 caching_ctl->progress = cache->key.objectid;
452 /* one for caching kthread, one for caching block group list */
453 atomic_set(&caching_ctl->count, 2);
454
817d52f8
JB
455 spin_lock(&cache->lock);
456 if (cache->cached != BTRFS_CACHE_NO) {
457 spin_unlock(&cache->lock);
11833d66
YZ
458 kfree(caching_ctl);
459 return 0;
817d52f8 460 }
11833d66 461 cache->caching_ctl = caching_ctl;
817d52f8
JB
462 cache->cached = BTRFS_CACHE_STARTED;
463 spin_unlock(&cache->lock);
464
11833d66
YZ
465 down_write(&fs_info->extent_commit_sem);
466 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
467 up_write(&fs_info->extent_commit_sem);
468
469 atomic_inc(&cache->space_info->caching_threads);
11dfe35a 470 btrfs_get_block_group(cache);
11833d66 471
817d52f8
JB
472 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
473 cache->key.objectid);
474 if (IS_ERR(tsk)) {
475 ret = PTR_ERR(tsk);
476 printk(KERN_ERR "error running thread %d\n", ret);
477 BUG();
478 }
479
ef8bbdfe 480 return ret;
e37c9e69
CM
481}
482
0f9dd46c
JB
483/*
484 * return the block group that starts at or after bytenr
485 */
d397712b
CM
486static struct btrfs_block_group_cache *
487btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
0ef3e66b 488{
0f9dd46c 489 struct btrfs_block_group_cache *cache;
0ef3e66b 490
0f9dd46c 491 cache = block_group_cache_tree_search(info, bytenr, 0);
0ef3e66b 492
0f9dd46c 493 return cache;
0ef3e66b
CM
494}
495
0f9dd46c 496/*
9f55684c 497 * return the block group that contains the given bytenr
0f9dd46c 498 */
d397712b
CM
499struct btrfs_block_group_cache *btrfs_lookup_block_group(
500 struct btrfs_fs_info *info,
501 u64 bytenr)
be744175 502{
0f9dd46c 503 struct btrfs_block_group_cache *cache;
be744175 504
0f9dd46c 505 cache = block_group_cache_tree_search(info, bytenr, 1);
96b5179d 506
0f9dd46c 507 return cache;
be744175 508}
0b86a832 509
0f9dd46c
JB
510static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
511 u64 flags)
6324fbf3 512{
0f9dd46c 513 struct list_head *head = &info->space_info;
0f9dd46c 514 struct btrfs_space_info *found;
4184ea7f 515
b742bb82
YZ
516 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
517 BTRFS_BLOCK_GROUP_METADATA;
518
4184ea7f
CM
519 rcu_read_lock();
520 list_for_each_entry_rcu(found, head, list) {
521 if (found->flags == flags) {
522 rcu_read_unlock();
0f9dd46c 523 return found;
4184ea7f 524 }
0f9dd46c 525 }
4184ea7f 526 rcu_read_unlock();
0f9dd46c 527 return NULL;
6324fbf3
CM
528}
529
4184ea7f
CM
530/*
531 * after adding space to the filesystem, we need to clear the full flags
532 * on all the space infos.
533 */
534void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
535{
536 struct list_head *head = &info->space_info;
537 struct btrfs_space_info *found;
538
539 rcu_read_lock();
540 list_for_each_entry_rcu(found, head, list)
541 found->full = 0;
542 rcu_read_unlock();
543}
544
80eb234a
JB
545static u64 div_factor(u64 num, int factor)
546{
547 if (factor == 10)
548 return num;
549 num *= factor;
550 do_div(num, 10);
551 return num;
552}
553
d2fb3437
YZ
554u64 btrfs_find_block_group(struct btrfs_root *root,
555 u64 search_start, u64 search_hint, int owner)
cd1bc465 556{
96b5179d 557 struct btrfs_block_group_cache *cache;
cd1bc465 558 u64 used;
d2fb3437
YZ
559 u64 last = max(search_hint, search_start);
560 u64 group_start = 0;
31f3c99b 561 int full_search = 0;
d2fb3437 562 int factor = 9;
0ef3e66b 563 int wrapped = 0;
31f3c99b 564again:
e8569813
ZY
565 while (1) {
566 cache = btrfs_lookup_first_block_group(root->fs_info, last);
0f9dd46c
JB
567 if (!cache)
568 break;
96b5179d 569
c286ac48 570 spin_lock(&cache->lock);
96b5179d
CM
571 last = cache->key.objectid + cache->key.offset;
572 used = btrfs_block_group_used(&cache->item);
573
d2fb3437
YZ
574 if ((full_search || !cache->ro) &&
575 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
e8569813 576 if (used + cache->pinned + cache->reserved <
d2fb3437
YZ
577 div_factor(cache->key.offset, factor)) {
578 group_start = cache->key.objectid;
c286ac48 579 spin_unlock(&cache->lock);
fa9c0d79 580 btrfs_put_block_group(cache);
8790d502
CM
581 goto found;
582 }
6324fbf3 583 }
c286ac48 584 spin_unlock(&cache->lock);
fa9c0d79 585 btrfs_put_block_group(cache);
de428b63 586 cond_resched();
cd1bc465 587 }
0ef3e66b
CM
588 if (!wrapped) {
589 last = search_start;
590 wrapped = 1;
591 goto again;
592 }
593 if (!full_search && factor < 10) {
be744175 594 last = search_start;
31f3c99b 595 full_search = 1;
0ef3e66b 596 factor = 10;
31f3c99b
CM
597 goto again;
598 }
be744175 599found:
d2fb3437 600 return group_start;
925baedd 601}
0f9dd46c 602
e02119d5 603/* simple helper to search for an existing extent at a given offset */
31840ae1 604int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
e02119d5
CM
605{
606 int ret;
607 struct btrfs_key key;
31840ae1 608 struct btrfs_path *path;
e02119d5 609
31840ae1
ZY
610 path = btrfs_alloc_path();
611 BUG_ON(!path);
e02119d5
CM
612 key.objectid = start;
613 key.offset = len;
614 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
615 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
616 0, 0);
31840ae1 617 btrfs_free_path(path);
7bb86316
CM
618 return ret;
619}
620
d8d5f3e1
CM
621/*
622 * Back reference rules. Back refs have three main goals:
623 *
624 * 1) differentiate between all holders of references to an extent so that
625 * when a reference is dropped we can make sure it was a valid reference
626 * before freeing the extent.
627 *
628 * 2) Provide enough information to quickly find the holders of an extent
629 * if we notice a given block is corrupted or bad.
630 *
631 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
632 * maintenance. This is actually the same as #2, but with a slightly
633 * different use case.
634 *
5d4f98a2
YZ
635 * There are two kinds of back refs. The implicit back refs is optimized
636 * for pointers in non-shared tree blocks. For a given pointer in a block,
637 * back refs of this kind provide information about the block's owner tree
638 * and the pointer's key. These information allow us to find the block by
639 * b-tree searching. The full back refs is for pointers in tree blocks not
640 * referenced by their owner trees. The location of tree block is recorded
641 * in the back refs. Actually the full back refs is generic, and can be
642 * used in all cases the implicit back refs is used. The major shortcoming
643 * of the full back refs is its overhead. Every time a tree block gets
644 * COWed, we have to update back refs entry for all pointers in it.
645 *
646 * For a newly allocated tree block, we use implicit back refs for
647 * pointers in it. This means most tree related operations only involve
648 * implicit back refs. For a tree block created in old transaction, the
649 * only way to drop a reference to it is COW it. So we can detect the
650 * event that tree block loses its owner tree's reference and do the
651 * back refs conversion.
652 *
653 * When a tree block is COW'd through a tree, there are four cases:
654 *
655 * The reference count of the block is one and the tree is the block's
656 * owner tree. Nothing to do in this case.
657 *
658 * The reference count of the block is one and the tree is not the
659 * block's owner tree. In this case, full back refs is used for pointers
660 * in the block. Remove these full back refs, add implicit back refs for
661 * every pointers in the new block.
662 *
663 * The reference count of the block is greater than one and the tree is
664 * the block's owner tree. In this case, implicit back refs is used for
665 * pointers in the block. Add full back refs for every pointers in the
666 * block, increase lower level extents' reference counts. The original
667 * implicit back refs are entailed to the new block.
668 *
669 * The reference count of the block is greater than one and the tree is
670 * not the block's owner tree. Add implicit back refs for every pointer in
671 * the new block, increase lower level extents' reference count.
672 *
673 * Back Reference Key composing:
674 *
675 * The key objectid corresponds to the first byte in the extent,
676 * The key type is used to differentiate between types of back refs.
677 * There are different meanings of the key offset for different types
678 * of back refs.
679 *
d8d5f3e1
CM
680 * File extents can be referenced by:
681 *
682 * - multiple snapshots, subvolumes, or different generations in one subvol
31840ae1 683 * - different files inside a single subvolume
d8d5f3e1
CM
684 * - different offsets inside a file (bookend extents in file.c)
685 *
5d4f98a2 686 * The extent ref structure for the implicit back refs has fields for:
d8d5f3e1
CM
687 *
688 * - Objectid of the subvolume root
d8d5f3e1 689 * - objectid of the file holding the reference
5d4f98a2
YZ
690 * - original offset in the file
691 * - how many bookend extents
d8d5f3e1 692 *
5d4f98a2
YZ
693 * The key offset for the implicit back refs is hash of the first
694 * three fields.
d8d5f3e1 695 *
5d4f98a2 696 * The extent ref structure for the full back refs has field for:
d8d5f3e1 697 *
5d4f98a2 698 * - number of pointers in the tree leaf
d8d5f3e1 699 *
5d4f98a2
YZ
700 * The key offset for the implicit back refs is the first byte of
701 * the tree leaf
d8d5f3e1 702 *
5d4f98a2
YZ
703 * When a file extent is allocated, The implicit back refs is used.
704 * the fields are filled in:
d8d5f3e1 705 *
5d4f98a2 706 * (root_key.objectid, inode objectid, offset in file, 1)
d8d5f3e1 707 *
5d4f98a2
YZ
708 * When a file extent is removed file truncation, we find the
709 * corresponding implicit back refs and check the following fields:
d8d5f3e1 710 *
5d4f98a2 711 * (btrfs_header_owner(leaf), inode objectid, offset in file)
d8d5f3e1 712 *
5d4f98a2 713 * Btree extents can be referenced by:
d8d5f3e1 714 *
5d4f98a2 715 * - Different subvolumes
d8d5f3e1 716 *
5d4f98a2
YZ
717 * Both the implicit back refs and the full back refs for tree blocks
718 * only consist of key. The key offset for the implicit back refs is
719 * objectid of block's owner tree. The key offset for the full back refs
720 * is the first byte of parent block.
d8d5f3e1 721 *
5d4f98a2
YZ
722 * When implicit back refs is used, information about the lowest key and
723 * level of the tree block are required. These information are stored in
724 * tree block info structure.
d8d5f3e1 725 */
31840ae1 726
5d4f98a2
YZ
727#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
728static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
729 struct btrfs_root *root,
730 struct btrfs_path *path,
731 u64 owner, u32 extra_size)
7bb86316 732{
5d4f98a2
YZ
733 struct btrfs_extent_item *item;
734 struct btrfs_extent_item_v0 *ei0;
735 struct btrfs_extent_ref_v0 *ref0;
736 struct btrfs_tree_block_info *bi;
737 struct extent_buffer *leaf;
7bb86316 738 struct btrfs_key key;
5d4f98a2
YZ
739 struct btrfs_key found_key;
740 u32 new_size = sizeof(*item);
741 u64 refs;
742 int ret;
743
744 leaf = path->nodes[0];
745 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
746
747 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
748 ei0 = btrfs_item_ptr(leaf, path->slots[0],
749 struct btrfs_extent_item_v0);
750 refs = btrfs_extent_refs_v0(leaf, ei0);
751
752 if (owner == (u64)-1) {
753 while (1) {
754 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
755 ret = btrfs_next_leaf(root, path);
756 if (ret < 0)
757 return ret;
758 BUG_ON(ret > 0);
759 leaf = path->nodes[0];
760 }
761 btrfs_item_key_to_cpu(leaf, &found_key,
762 path->slots[0]);
763 BUG_ON(key.objectid != found_key.objectid);
764 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
765 path->slots[0]++;
766 continue;
767 }
768 ref0 = btrfs_item_ptr(leaf, path->slots[0],
769 struct btrfs_extent_ref_v0);
770 owner = btrfs_ref_objectid_v0(leaf, ref0);
771 break;
772 }
773 }
774 btrfs_release_path(root, path);
775
776 if (owner < BTRFS_FIRST_FREE_OBJECTID)
777 new_size += sizeof(*bi);
778
779 new_size -= sizeof(*ei0);
780 ret = btrfs_search_slot(trans, root, &key, path,
781 new_size + extra_size, 1);
782 if (ret < 0)
783 return ret;
784 BUG_ON(ret);
785
786 ret = btrfs_extend_item(trans, root, path, new_size);
787 BUG_ON(ret);
788
789 leaf = path->nodes[0];
790 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
791 btrfs_set_extent_refs(leaf, item, refs);
792 /* FIXME: get real generation */
793 btrfs_set_extent_generation(leaf, item, 0);
794 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
795 btrfs_set_extent_flags(leaf, item,
796 BTRFS_EXTENT_FLAG_TREE_BLOCK |
797 BTRFS_BLOCK_FLAG_FULL_BACKREF);
798 bi = (struct btrfs_tree_block_info *)(item + 1);
799 /* FIXME: get first key of the block */
800 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
801 btrfs_set_tree_block_level(leaf, bi, (int)owner);
802 } else {
803 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
804 }
805 btrfs_mark_buffer_dirty(leaf);
806 return 0;
807}
808#endif
809
810static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
811{
812 u32 high_crc = ~(u32)0;
813 u32 low_crc = ~(u32)0;
814 __le64 lenum;
815
816 lenum = cpu_to_le64(root_objectid);
163e783e 817 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
5d4f98a2 818 lenum = cpu_to_le64(owner);
163e783e 819 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2 820 lenum = cpu_to_le64(offset);
163e783e 821 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2
YZ
822
823 return ((u64)high_crc << 31) ^ (u64)low_crc;
824}
825
826static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
827 struct btrfs_extent_data_ref *ref)
828{
829 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
830 btrfs_extent_data_ref_objectid(leaf, ref),
831 btrfs_extent_data_ref_offset(leaf, ref));
832}
833
834static int match_extent_data_ref(struct extent_buffer *leaf,
835 struct btrfs_extent_data_ref *ref,
836 u64 root_objectid, u64 owner, u64 offset)
837{
838 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
839 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
840 btrfs_extent_data_ref_offset(leaf, ref) != offset)
841 return 0;
842 return 1;
843}
844
845static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
846 struct btrfs_root *root,
847 struct btrfs_path *path,
848 u64 bytenr, u64 parent,
849 u64 root_objectid,
850 u64 owner, u64 offset)
851{
852 struct btrfs_key key;
853 struct btrfs_extent_data_ref *ref;
31840ae1 854 struct extent_buffer *leaf;
5d4f98a2 855 u32 nritems;
74493f7a 856 int ret;
5d4f98a2
YZ
857 int recow;
858 int err = -ENOENT;
74493f7a 859
31840ae1 860 key.objectid = bytenr;
5d4f98a2
YZ
861 if (parent) {
862 key.type = BTRFS_SHARED_DATA_REF_KEY;
863 key.offset = parent;
864 } else {
865 key.type = BTRFS_EXTENT_DATA_REF_KEY;
866 key.offset = hash_extent_data_ref(root_objectid,
867 owner, offset);
868 }
869again:
870 recow = 0;
871 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
872 if (ret < 0) {
873 err = ret;
874 goto fail;
875 }
31840ae1 876
5d4f98a2
YZ
877 if (parent) {
878 if (!ret)
879 return 0;
880#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
881 key.type = BTRFS_EXTENT_REF_V0_KEY;
882 btrfs_release_path(root, path);
883 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
884 if (ret < 0) {
885 err = ret;
886 goto fail;
887 }
888 if (!ret)
889 return 0;
890#endif
891 goto fail;
31840ae1
ZY
892 }
893
894 leaf = path->nodes[0];
5d4f98a2
YZ
895 nritems = btrfs_header_nritems(leaf);
896 while (1) {
897 if (path->slots[0] >= nritems) {
898 ret = btrfs_next_leaf(root, path);
899 if (ret < 0)
900 err = ret;
901 if (ret)
902 goto fail;
903
904 leaf = path->nodes[0];
905 nritems = btrfs_header_nritems(leaf);
906 recow = 1;
907 }
908
909 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
910 if (key.objectid != bytenr ||
911 key.type != BTRFS_EXTENT_DATA_REF_KEY)
912 goto fail;
913
914 ref = btrfs_item_ptr(leaf, path->slots[0],
915 struct btrfs_extent_data_ref);
916
917 if (match_extent_data_ref(leaf, ref, root_objectid,
918 owner, offset)) {
919 if (recow) {
920 btrfs_release_path(root, path);
921 goto again;
922 }
923 err = 0;
924 break;
925 }
926 path->slots[0]++;
31840ae1 927 }
5d4f98a2
YZ
928fail:
929 return err;
31840ae1
ZY
930}
931
5d4f98a2
YZ
932static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
933 struct btrfs_root *root,
934 struct btrfs_path *path,
935 u64 bytenr, u64 parent,
936 u64 root_objectid, u64 owner,
937 u64 offset, int refs_to_add)
31840ae1
ZY
938{
939 struct btrfs_key key;
940 struct extent_buffer *leaf;
5d4f98a2 941 u32 size;
31840ae1
ZY
942 u32 num_refs;
943 int ret;
74493f7a 944
74493f7a 945 key.objectid = bytenr;
5d4f98a2
YZ
946 if (parent) {
947 key.type = BTRFS_SHARED_DATA_REF_KEY;
948 key.offset = parent;
949 size = sizeof(struct btrfs_shared_data_ref);
950 } else {
951 key.type = BTRFS_EXTENT_DATA_REF_KEY;
952 key.offset = hash_extent_data_ref(root_objectid,
953 owner, offset);
954 size = sizeof(struct btrfs_extent_data_ref);
955 }
74493f7a 956
5d4f98a2
YZ
957 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
958 if (ret && ret != -EEXIST)
959 goto fail;
960
961 leaf = path->nodes[0];
962 if (parent) {
963 struct btrfs_shared_data_ref *ref;
31840ae1 964 ref = btrfs_item_ptr(leaf, path->slots[0],
5d4f98a2
YZ
965 struct btrfs_shared_data_ref);
966 if (ret == 0) {
967 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
968 } else {
969 num_refs = btrfs_shared_data_ref_count(leaf, ref);
970 num_refs += refs_to_add;
971 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
31840ae1 972 }
5d4f98a2
YZ
973 } else {
974 struct btrfs_extent_data_ref *ref;
975 while (ret == -EEXIST) {
976 ref = btrfs_item_ptr(leaf, path->slots[0],
977 struct btrfs_extent_data_ref);
978 if (match_extent_data_ref(leaf, ref, root_objectid,
979 owner, offset))
980 break;
981 btrfs_release_path(root, path);
982 key.offset++;
983 ret = btrfs_insert_empty_item(trans, root, path, &key,
984 size);
985 if (ret && ret != -EEXIST)
986 goto fail;
31840ae1 987
5d4f98a2
YZ
988 leaf = path->nodes[0];
989 }
990 ref = btrfs_item_ptr(leaf, path->slots[0],
991 struct btrfs_extent_data_ref);
992 if (ret == 0) {
993 btrfs_set_extent_data_ref_root(leaf, ref,
994 root_objectid);
995 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
996 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
997 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
998 } else {
999 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1000 num_refs += refs_to_add;
1001 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
31840ae1 1002 }
31840ae1 1003 }
5d4f98a2
YZ
1004 btrfs_mark_buffer_dirty(leaf);
1005 ret = 0;
1006fail:
7bb86316
CM
1007 btrfs_release_path(root, path);
1008 return ret;
74493f7a
CM
1009}
1010
5d4f98a2
YZ
1011static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1012 struct btrfs_root *root,
1013 struct btrfs_path *path,
1014 int refs_to_drop)
31840ae1 1015{
5d4f98a2
YZ
1016 struct btrfs_key key;
1017 struct btrfs_extent_data_ref *ref1 = NULL;
1018 struct btrfs_shared_data_ref *ref2 = NULL;
31840ae1 1019 struct extent_buffer *leaf;
5d4f98a2 1020 u32 num_refs = 0;
31840ae1
ZY
1021 int ret = 0;
1022
1023 leaf = path->nodes[0];
5d4f98a2
YZ
1024 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1025
1026 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1027 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1028 struct btrfs_extent_data_ref);
1029 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1030 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1031 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1032 struct btrfs_shared_data_ref);
1033 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1034#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1035 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1036 struct btrfs_extent_ref_v0 *ref0;
1037 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1038 struct btrfs_extent_ref_v0);
1039 num_refs = btrfs_ref_count_v0(leaf, ref0);
1040#endif
1041 } else {
1042 BUG();
1043 }
1044
56bec294
CM
1045 BUG_ON(num_refs < refs_to_drop);
1046 num_refs -= refs_to_drop;
5d4f98a2 1047
31840ae1
ZY
1048 if (num_refs == 0) {
1049 ret = btrfs_del_item(trans, root, path);
1050 } else {
5d4f98a2
YZ
1051 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1052 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1053 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1054 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1055#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1056 else {
1057 struct btrfs_extent_ref_v0 *ref0;
1058 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1059 struct btrfs_extent_ref_v0);
1060 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1061 }
1062#endif
31840ae1
ZY
1063 btrfs_mark_buffer_dirty(leaf);
1064 }
31840ae1
ZY
1065 return ret;
1066}
1067
5d4f98a2
YZ
1068static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1069 struct btrfs_path *path,
1070 struct btrfs_extent_inline_ref *iref)
15916de8 1071{
5d4f98a2
YZ
1072 struct btrfs_key key;
1073 struct extent_buffer *leaf;
1074 struct btrfs_extent_data_ref *ref1;
1075 struct btrfs_shared_data_ref *ref2;
1076 u32 num_refs = 0;
1077
1078 leaf = path->nodes[0];
1079 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1080 if (iref) {
1081 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1082 BTRFS_EXTENT_DATA_REF_KEY) {
1083 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1084 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1085 } else {
1086 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1087 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1088 }
1089 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1090 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1091 struct btrfs_extent_data_ref);
1092 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1093 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1094 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1095 struct btrfs_shared_data_ref);
1096 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1097#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1098 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1099 struct btrfs_extent_ref_v0 *ref0;
1100 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1101 struct btrfs_extent_ref_v0);
1102 num_refs = btrfs_ref_count_v0(leaf, ref0);
4b4e25f2 1103#endif
5d4f98a2
YZ
1104 } else {
1105 WARN_ON(1);
1106 }
1107 return num_refs;
1108}
15916de8 1109
5d4f98a2
YZ
1110static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1111 struct btrfs_root *root,
1112 struct btrfs_path *path,
1113 u64 bytenr, u64 parent,
1114 u64 root_objectid)
1f3c79a2 1115{
5d4f98a2 1116 struct btrfs_key key;
1f3c79a2 1117 int ret;
1f3c79a2 1118
5d4f98a2
YZ
1119 key.objectid = bytenr;
1120 if (parent) {
1121 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1122 key.offset = parent;
1123 } else {
1124 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1125 key.offset = root_objectid;
1f3c79a2
LH
1126 }
1127
5d4f98a2
YZ
1128 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1129 if (ret > 0)
1130 ret = -ENOENT;
1131#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1132 if (ret == -ENOENT && parent) {
1133 btrfs_release_path(root, path);
1134 key.type = BTRFS_EXTENT_REF_V0_KEY;
1135 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1136 if (ret > 0)
1137 ret = -ENOENT;
1138 }
1f3c79a2 1139#endif
5d4f98a2 1140 return ret;
1f3c79a2
LH
1141}
1142
5d4f98a2
YZ
1143static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1144 struct btrfs_root *root,
1145 struct btrfs_path *path,
1146 u64 bytenr, u64 parent,
1147 u64 root_objectid)
31840ae1 1148{
5d4f98a2 1149 struct btrfs_key key;
31840ae1 1150 int ret;
31840ae1 1151
5d4f98a2
YZ
1152 key.objectid = bytenr;
1153 if (parent) {
1154 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1155 key.offset = parent;
1156 } else {
1157 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1158 key.offset = root_objectid;
1159 }
1160
1161 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1162 btrfs_release_path(root, path);
31840ae1
ZY
1163 return ret;
1164}
1165
5d4f98a2 1166static inline int extent_ref_type(u64 parent, u64 owner)
31840ae1 1167{
5d4f98a2
YZ
1168 int type;
1169 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1170 if (parent > 0)
1171 type = BTRFS_SHARED_BLOCK_REF_KEY;
1172 else
1173 type = BTRFS_TREE_BLOCK_REF_KEY;
1174 } else {
1175 if (parent > 0)
1176 type = BTRFS_SHARED_DATA_REF_KEY;
1177 else
1178 type = BTRFS_EXTENT_DATA_REF_KEY;
1179 }
1180 return type;
31840ae1 1181}
56bec294 1182
2c47e605
YZ
1183static int find_next_key(struct btrfs_path *path, int level,
1184 struct btrfs_key *key)
56bec294 1185
02217ed2 1186{
2c47e605 1187 for (; level < BTRFS_MAX_LEVEL; level++) {
5d4f98a2
YZ
1188 if (!path->nodes[level])
1189 break;
5d4f98a2
YZ
1190 if (path->slots[level] + 1 >=
1191 btrfs_header_nritems(path->nodes[level]))
1192 continue;
1193 if (level == 0)
1194 btrfs_item_key_to_cpu(path->nodes[level], key,
1195 path->slots[level] + 1);
1196 else
1197 btrfs_node_key_to_cpu(path->nodes[level], key,
1198 path->slots[level] + 1);
1199 return 0;
1200 }
1201 return 1;
1202}
037e6390 1203
5d4f98a2
YZ
1204/*
1205 * look for inline back ref. if back ref is found, *ref_ret is set
1206 * to the address of inline back ref, and 0 is returned.
1207 *
1208 * if back ref isn't found, *ref_ret is set to the address where it
1209 * should be inserted, and -ENOENT is returned.
1210 *
1211 * if insert is true and there are too many inline back refs, the path
1212 * points to the extent item, and -EAGAIN is returned.
1213 *
1214 * NOTE: inline back refs are ordered in the same way that back ref
1215 * items in the tree are ordered.
1216 */
1217static noinline_for_stack
1218int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1219 struct btrfs_root *root,
1220 struct btrfs_path *path,
1221 struct btrfs_extent_inline_ref **ref_ret,
1222 u64 bytenr, u64 num_bytes,
1223 u64 parent, u64 root_objectid,
1224 u64 owner, u64 offset, int insert)
1225{
1226 struct btrfs_key key;
1227 struct extent_buffer *leaf;
1228 struct btrfs_extent_item *ei;
1229 struct btrfs_extent_inline_ref *iref;
1230 u64 flags;
1231 u64 item_size;
1232 unsigned long ptr;
1233 unsigned long end;
1234 int extra_size;
1235 int type;
1236 int want;
1237 int ret;
1238 int err = 0;
26b8003f 1239
db94535d 1240 key.objectid = bytenr;
31840ae1 1241 key.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1242 key.offset = num_bytes;
31840ae1 1243
5d4f98a2
YZ
1244 want = extent_ref_type(parent, owner);
1245 if (insert) {
1246 extra_size = btrfs_extent_inline_ref_size(want);
85d4198e 1247 path->keep_locks = 1;
5d4f98a2
YZ
1248 } else
1249 extra_size = -1;
1250 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
b9473439 1251 if (ret < 0) {
5d4f98a2
YZ
1252 err = ret;
1253 goto out;
1254 }
1255 BUG_ON(ret);
1256
1257 leaf = path->nodes[0];
1258 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1259#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1260 if (item_size < sizeof(*ei)) {
1261 if (!insert) {
1262 err = -ENOENT;
1263 goto out;
1264 }
1265 ret = convert_extent_item_v0(trans, root, path, owner,
1266 extra_size);
1267 if (ret < 0) {
1268 err = ret;
1269 goto out;
1270 }
1271 leaf = path->nodes[0];
1272 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1273 }
1274#endif
1275 BUG_ON(item_size < sizeof(*ei));
1276
5d4f98a2
YZ
1277 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1278 flags = btrfs_extent_flags(leaf, ei);
1279
1280 ptr = (unsigned long)(ei + 1);
1281 end = (unsigned long)ei + item_size;
1282
1283 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1284 ptr += sizeof(struct btrfs_tree_block_info);
1285 BUG_ON(ptr > end);
1286 } else {
1287 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1288 }
1289
1290 err = -ENOENT;
1291 while (1) {
1292 if (ptr >= end) {
1293 WARN_ON(ptr > end);
1294 break;
1295 }
1296 iref = (struct btrfs_extent_inline_ref *)ptr;
1297 type = btrfs_extent_inline_ref_type(leaf, iref);
1298 if (want < type)
1299 break;
1300 if (want > type) {
1301 ptr += btrfs_extent_inline_ref_size(type);
1302 continue;
1303 }
1304
1305 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1306 struct btrfs_extent_data_ref *dref;
1307 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1308 if (match_extent_data_ref(leaf, dref, root_objectid,
1309 owner, offset)) {
1310 err = 0;
1311 break;
1312 }
1313 if (hash_extent_data_ref_item(leaf, dref) <
1314 hash_extent_data_ref(root_objectid, owner, offset))
1315 break;
1316 } else {
1317 u64 ref_offset;
1318 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1319 if (parent > 0) {
1320 if (parent == ref_offset) {
1321 err = 0;
1322 break;
1323 }
1324 if (ref_offset < parent)
1325 break;
1326 } else {
1327 if (root_objectid == ref_offset) {
1328 err = 0;
1329 break;
1330 }
1331 if (ref_offset < root_objectid)
1332 break;
1333 }
1334 }
1335 ptr += btrfs_extent_inline_ref_size(type);
1336 }
1337 if (err == -ENOENT && insert) {
1338 if (item_size + extra_size >=
1339 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1340 err = -EAGAIN;
1341 goto out;
1342 }
1343 /*
1344 * To add new inline back ref, we have to make sure
1345 * there is no corresponding back ref item.
1346 * For simplicity, we just do not add new inline back
1347 * ref if there is any kind of item for this block
1348 */
2c47e605
YZ
1349 if (find_next_key(path, 0, &key) == 0 &&
1350 key.objectid == bytenr &&
85d4198e 1351 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
5d4f98a2
YZ
1352 err = -EAGAIN;
1353 goto out;
1354 }
1355 }
1356 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1357out:
85d4198e 1358 if (insert) {
5d4f98a2
YZ
1359 path->keep_locks = 0;
1360 btrfs_unlock_up_safe(path, 1);
1361 }
1362 return err;
1363}
1364
1365/*
1366 * helper to add new inline back ref
1367 */
1368static noinline_for_stack
1369int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1370 struct btrfs_root *root,
1371 struct btrfs_path *path,
1372 struct btrfs_extent_inline_ref *iref,
1373 u64 parent, u64 root_objectid,
1374 u64 owner, u64 offset, int refs_to_add,
1375 struct btrfs_delayed_extent_op *extent_op)
1376{
1377 struct extent_buffer *leaf;
1378 struct btrfs_extent_item *ei;
1379 unsigned long ptr;
1380 unsigned long end;
1381 unsigned long item_offset;
1382 u64 refs;
1383 int size;
1384 int type;
1385 int ret;
1386
1387 leaf = path->nodes[0];
1388 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1389 item_offset = (unsigned long)iref - (unsigned long)ei;
1390
1391 type = extent_ref_type(parent, owner);
1392 size = btrfs_extent_inline_ref_size(type);
1393
1394 ret = btrfs_extend_item(trans, root, path, size);
1395 BUG_ON(ret);
1396
1397 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1398 refs = btrfs_extent_refs(leaf, ei);
1399 refs += refs_to_add;
1400 btrfs_set_extent_refs(leaf, ei, refs);
1401 if (extent_op)
1402 __run_delayed_extent_op(extent_op, leaf, ei);
1403
1404 ptr = (unsigned long)ei + item_offset;
1405 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1406 if (ptr < end - size)
1407 memmove_extent_buffer(leaf, ptr + size, ptr,
1408 end - size - ptr);
1409
1410 iref = (struct btrfs_extent_inline_ref *)ptr;
1411 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1412 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1413 struct btrfs_extent_data_ref *dref;
1414 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1415 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1416 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1417 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1418 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1419 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1420 struct btrfs_shared_data_ref *sref;
1421 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1422 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1423 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1424 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1425 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1426 } else {
1427 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1428 }
1429 btrfs_mark_buffer_dirty(leaf);
1430 return 0;
1431}
1432
1433static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1434 struct btrfs_root *root,
1435 struct btrfs_path *path,
1436 struct btrfs_extent_inline_ref **ref_ret,
1437 u64 bytenr, u64 num_bytes, u64 parent,
1438 u64 root_objectid, u64 owner, u64 offset)
1439{
1440 int ret;
1441
1442 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1443 bytenr, num_bytes, parent,
1444 root_objectid, owner, offset, 0);
1445 if (ret != -ENOENT)
54aa1f4d 1446 return ret;
5d4f98a2
YZ
1447
1448 btrfs_release_path(root, path);
1449 *ref_ret = NULL;
1450
1451 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1452 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1453 root_objectid);
1454 } else {
1455 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1456 root_objectid, owner, offset);
b9473439 1457 }
5d4f98a2
YZ
1458 return ret;
1459}
31840ae1 1460
5d4f98a2
YZ
1461/*
1462 * helper to update/remove inline back ref
1463 */
1464static noinline_for_stack
1465int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1466 struct btrfs_root *root,
1467 struct btrfs_path *path,
1468 struct btrfs_extent_inline_ref *iref,
1469 int refs_to_mod,
1470 struct btrfs_delayed_extent_op *extent_op)
1471{
1472 struct extent_buffer *leaf;
1473 struct btrfs_extent_item *ei;
1474 struct btrfs_extent_data_ref *dref = NULL;
1475 struct btrfs_shared_data_ref *sref = NULL;
1476 unsigned long ptr;
1477 unsigned long end;
1478 u32 item_size;
1479 int size;
1480 int type;
1481 int ret;
1482 u64 refs;
1483
1484 leaf = path->nodes[0];
1485 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1486 refs = btrfs_extent_refs(leaf, ei);
1487 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1488 refs += refs_to_mod;
1489 btrfs_set_extent_refs(leaf, ei, refs);
1490 if (extent_op)
1491 __run_delayed_extent_op(extent_op, leaf, ei);
1492
1493 type = btrfs_extent_inline_ref_type(leaf, iref);
1494
1495 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1496 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1497 refs = btrfs_extent_data_ref_count(leaf, dref);
1498 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1499 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1500 refs = btrfs_shared_data_ref_count(leaf, sref);
1501 } else {
1502 refs = 1;
1503 BUG_ON(refs_to_mod != -1);
56bec294 1504 }
31840ae1 1505
5d4f98a2
YZ
1506 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1507 refs += refs_to_mod;
1508
1509 if (refs > 0) {
1510 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1511 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1512 else
1513 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1514 } else {
1515 size = btrfs_extent_inline_ref_size(type);
1516 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1517 ptr = (unsigned long)iref;
1518 end = (unsigned long)ei + item_size;
1519 if (ptr + size < end)
1520 memmove_extent_buffer(leaf, ptr, ptr + size,
1521 end - ptr - size);
1522 item_size -= size;
1523 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1524 BUG_ON(ret);
1525 }
1526 btrfs_mark_buffer_dirty(leaf);
1527 return 0;
1528}
1529
1530static noinline_for_stack
1531int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1532 struct btrfs_root *root,
1533 struct btrfs_path *path,
1534 u64 bytenr, u64 num_bytes, u64 parent,
1535 u64 root_objectid, u64 owner,
1536 u64 offset, int refs_to_add,
1537 struct btrfs_delayed_extent_op *extent_op)
1538{
1539 struct btrfs_extent_inline_ref *iref;
1540 int ret;
1541
1542 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1543 bytenr, num_bytes, parent,
1544 root_objectid, owner, offset, 1);
1545 if (ret == 0) {
1546 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1547 ret = update_inline_extent_backref(trans, root, path, iref,
1548 refs_to_add, extent_op);
1549 } else if (ret == -ENOENT) {
1550 ret = setup_inline_extent_backref(trans, root, path, iref,
1551 parent, root_objectid,
1552 owner, offset, refs_to_add,
1553 extent_op);
771ed689 1554 }
5d4f98a2
YZ
1555 return ret;
1556}
31840ae1 1557
5d4f98a2
YZ
1558static int insert_extent_backref(struct btrfs_trans_handle *trans,
1559 struct btrfs_root *root,
1560 struct btrfs_path *path,
1561 u64 bytenr, u64 parent, u64 root_objectid,
1562 u64 owner, u64 offset, int refs_to_add)
1563{
1564 int ret;
1565 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1566 BUG_ON(refs_to_add != 1);
1567 ret = insert_tree_block_ref(trans, root, path, bytenr,
1568 parent, root_objectid);
1569 } else {
1570 ret = insert_extent_data_ref(trans, root, path, bytenr,
1571 parent, root_objectid,
1572 owner, offset, refs_to_add);
1573 }
1574 return ret;
1575}
56bec294 1576
5d4f98a2
YZ
1577static int remove_extent_backref(struct btrfs_trans_handle *trans,
1578 struct btrfs_root *root,
1579 struct btrfs_path *path,
1580 struct btrfs_extent_inline_ref *iref,
1581 int refs_to_drop, int is_data)
1582{
1583 int ret;
b9473439 1584
5d4f98a2
YZ
1585 BUG_ON(!is_data && refs_to_drop != 1);
1586 if (iref) {
1587 ret = update_inline_extent_backref(trans, root, path, iref,
1588 -refs_to_drop, NULL);
1589 } else if (is_data) {
1590 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1591 } else {
1592 ret = btrfs_del_item(trans, root, path);
1593 }
1594 return ret;
1595}
1596
5d4f98a2
YZ
1597static void btrfs_issue_discard(struct block_device *bdev,
1598 u64 start, u64 len)
1599{
746cd1e7
CH
1600 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1601 DISCARD_FL_BARRIER);
5d4f98a2 1602}
5d4f98a2
YZ
1603
1604static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1605 u64 num_bytes)
1606{
5d4f98a2
YZ
1607 int ret;
1608 u64 map_length = num_bytes;
1609 struct btrfs_multi_bio *multi = NULL;
1610
e244a0ae
CH
1611 if (!btrfs_test_opt(root, DISCARD))
1612 return 0;
1613
5d4f98a2
YZ
1614 /* Tell the block device(s) that the sectors can be discarded */
1615 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1616 bytenr, &map_length, &multi, 0);
1617 if (!ret) {
1618 struct btrfs_bio_stripe *stripe = multi->stripes;
1619 int i;
1620
1621 if (map_length > num_bytes)
1622 map_length = num_bytes;
1623
1624 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1625 btrfs_issue_discard(stripe->dev->bdev,
1626 stripe->physical,
1627 map_length);
1628 }
1629 kfree(multi);
1630 }
1631
1632 return ret;
5d4f98a2
YZ
1633}
1634
1635int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1636 struct btrfs_root *root,
1637 u64 bytenr, u64 num_bytes, u64 parent,
1638 u64 root_objectid, u64 owner, u64 offset)
1639{
1640 int ret;
1641 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1642 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1643
1644 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1645 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1646 parent, root_objectid, (int)owner,
1647 BTRFS_ADD_DELAYED_REF, NULL);
1648 } else {
1649 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1650 parent, root_objectid, owner, offset,
1651 BTRFS_ADD_DELAYED_REF, NULL);
1652 }
1653 return ret;
1654}
1655
1656static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1657 struct btrfs_root *root,
1658 u64 bytenr, u64 num_bytes,
1659 u64 parent, u64 root_objectid,
1660 u64 owner, u64 offset, int refs_to_add,
1661 struct btrfs_delayed_extent_op *extent_op)
1662{
1663 struct btrfs_path *path;
1664 struct extent_buffer *leaf;
1665 struct btrfs_extent_item *item;
1666 u64 refs;
1667 int ret;
1668 int err = 0;
1669
1670 path = btrfs_alloc_path();
1671 if (!path)
1672 return -ENOMEM;
1673
1674 path->reada = 1;
1675 path->leave_spinning = 1;
1676 /* this will setup the path even if it fails to insert the back ref */
1677 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1678 path, bytenr, num_bytes, parent,
1679 root_objectid, owner, offset,
1680 refs_to_add, extent_op);
1681 if (ret == 0)
1682 goto out;
1683
1684 if (ret != -EAGAIN) {
1685 err = ret;
1686 goto out;
1687 }
1688
1689 leaf = path->nodes[0];
1690 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1691 refs = btrfs_extent_refs(leaf, item);
1692 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1693 if (extent_op)
1694 __run_delayed_extent_op(extent_op, leaf, item);
56bec294 1695
5d4f98a2 1696 btrfs_mark_buffer_dirty(leaf);
56bec294
CM
1697 btrfs_release_path(root->fs_info->extent_root, path);
1698
1699 path->reada = 1;
b9473439
CM
1700 path->leave_spinning = 1;
1701
56bec294
CM
1702 /* now insert the actual backref */
1703 ret = insert_extent_backref(trans, root->fs_info->extent_root,
5d4f98a2
YZ
1704 path, bytenr, parent, root_objectid,
1705 owner, offset, refs_to_add);
56bec294 1706 BUG_ON(ret);
5d4f98a2 1707out:
56bec294 1708 btrfs_free_path(path);
5d4f98a2 1709 return err;
56bec294
CM
1710}
1711
5d4f98a2
YZ
1712static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1713 struct btrfs_root *root,
1714 struct btrfs_delayed_ref_node *node,
1715 struct btrfs_delayed_extent_op *extent_op,
1716 int insert_reserved)
56bec294 1717{
5d4f98a2
YZ
1718 int ret = 0;
1719 struct btrfs_delayed_data_ref *ref;
1720 struct btrfs_key ins;
1721 u64 parent = 0;
1722 u64 ref_root = 0;
1723 u64 flags = 0;
1724
1725 ins.objectid = node->bytenr;
1726 ins.offset = node->num_bytes;
1727 ins.type = BTRFS_EXTENT_ITEM_KEY;
1728
1729 ref = btrfs_delayed_node_to_data_ref(node);
1730 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1731 parent = ref->parent;
1732 else
1733 ref_root = ref->root;
1734
1735 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1736 if (extent_op) {
1737 BUG_ON(extent_op->update_key);
1738 flags |= extent_op->flags_to_set;
1739 }
1740 ret = alloc_reserved_file_extent(trans, root,
1741 parent, ref_root, flags,
1742 ref->objectid, ref->offset,
1743 &ins, node->ref_mod);
5d4f98a2
YZ
1744 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1745 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1746 node->num_bytes, parent,
1747 ref_root, ref->objectid,
1748 ref->offset, node->ref_mod,
1749 extent_op);
1750 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1751 ret = __btrfs_free_extent(trans, root, node->bytenr,
1752 node->num_bytes, parent,
1753 ref_root, ref->objectid,
1754 ref->offset, node->ref_mod,
1755 extent_op);
1756 } else {
1757 BUG();
1758 }
1759 return ret;
1760}
1761
1762static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1763 struct extent_buffer *leaf,
1764 struct btrfs_extent_item *ei)
1765{
1766 u64 flags = btrfs_extent_flags(leaf, ei);
1767 if (extent_op->update_flags) {
1768 flags |= extent_op->flags_to_set;
1769 btrfs_set_extent_flags(leaf, ei, flags);
1770 }
1771
1772 if (extent_op->update_key) {
1773 struct btrfs_tree_block_info *bi;
1774 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1775 bi = (struct btrfs_tree_block_info *)(ei + 1);
1776 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1777 }
1778}
1779
1780static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1781 struct btrfs_root *root,
1782 struct btrfs_delayed_ref_node *node,
1783 struct btrfs_delayed_extent_op *extent_op)
1784{
1785 struct btrfs_key key;
1786 struct btrfs_path *path;
1787 struct btrfs_extent_item *ei;
1788 struct extent_buffer *leaf;
1789 u32 item_size;
56bec294 1790 int ret;
5d4f98a2
YZ
1791 int err = 0;
1792
1793 path = btrfs_alloc_path();
1794 if (!path)
1795 return -ENOMEM;
1796
1797 key.objectid = node->bytenr;
1798 key.type = BTRFS_EXTENT_ITEM_KEY;
1799 key.offset = node->num_bytes;
1800
1801 path->reada = 1;
1802 path->leave_spinning = 1;
1803 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1804 path, 0, 1);
1805 if (ret < 0) {
1806 err = ret;
1807 goto out;
1808 }
1809 if (ret > 0) {
1810 err = -EIO;
1811 goto out;
1812 }
1813
1814 leaf = path->nodes[0];
1815 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1816#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1817 if (item_size < sizeof(*ei)) {
1818 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1819 path, (u64)-1, 0);
1820 if (ret < 0) {
1821 err = ret;
1822 goto out;
1823 }
1824 leaf = path->nodes[0];
1825 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1826 }
1827#endif
1828 BUG_ON(item_size < sizeof(*ei));
1829 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1830 __run_delayed_extent_op(extent_op, leaf, ei);
56bec294 1831
5d4f98a2
YZ
1832 btrfs_mark_buffer_dirty(leaf);
1833out:
1834 btrfs_free_path(path);
1835 return err;
56bec294
CM
1836}
1837
5d4f98a2
YZ
1838static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1839 struct btrfs_root *root,
1840 struct btrfs_delayed_ref_node *node,
1841 struct btrfs_delayed_extent_op *extent_op,
1842 int insert_reserved)
56bec294
CM
1843{
1844 int ret = 0;
5d4f98a2
YZ
1845 struct btrfs_delayed_tree_ref *ref;
1846 struct btrfs_key ins;
1847 u64 parent = 0;
1848 u64 ref_root = 0;
56bec294 1849
5d4f98a2
YZ
1850 ins.objectid = node->bytenr;
1851 ins.offset = node->num_bytes;
1852 ins.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1853
5d4f98a2
YZ
1854 ref = btrfs_delayed_node_to_tree_ref(node);
1855 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1856 parent = ref->parent;
1857 else
1858 ref_root = ref->root;
1859
1860 BUG_ON(node->ref_mod != 1);
1861 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1862 BUG_ON(!extent_op || !extent_op->update_flags ||
1863 !extent_op->update_key);
1864 ret = alloc_reserved_tree_block(trans, root,
1865 parent, ref_root,
1866 extent_op->flags_to_set,
1867 &extent_op->key,
1868 ref->level, &ins);
5d4f98a2
YZ
1869 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1870 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1871 node->num_bytes, parent, ref_root,
1872 ref->level, 0, 1, extent_op);
1873 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1874 ret = __btrfs_free_extent(trans, root, node->bytenr,
1875 node->num_bytes, parent, ref_root,
1876 ref->level, 0, 1, extent_op);
1877 } else {
1878 BUG();
1879 }
56bec294
CM
1880 return ret;
1881}
1882
5d4f98a2 1883
56bec294 1884/* helper function to actually process a single delayed ref entry */
5d4f98a2
YZ
1885static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1886 struct btrfs_root *root,
1887 struct btrfs_delayed_ref_node *node,
1888 struct btrfs_delayed_extent_op *extent_op,
1889 int insert_reserved)
56bec294
CM
1890{
1891 int ret;
5d4f98a2 1892 if (btrfs_delayed_ref_is_head(node)) {
56bec294
CM
1893 struct btrfs_delayed_ref_head *head;
1894 /*
1895 * we've hit the end of the chain and we were supposed
1896 * to insert this extent into the tree. But, it got
1897 * deleted before we ever needed to insert it, so all
1898 * we have to do is clean up the accounting
1899 */
5d4f98a2
YZ
1900 BUG_ON(extent_op);
1901 head = btrfs_delayed_node_to_head(node);
56bec294 1902 if (insert_reserved) {
11833d66
YZ
1903 int mark_free = 0;
1904 struct extent_buffer *must_clean = NULL;
1905
1906 ret = pin_down_bytes(trans, root, NULL,
1907 node->bytenr, node->num_bytes,
1908 head->is_data, 1, &must_clean);
1909 if (ret > 0)
1910 mark_free = 1;
1911
1912 if (must_clean) {
1913 clean_tree_block(NULL, root, must_clean);
1914 btrfs_tree_unlock(must_clean);
1915 free_extent_buffer(must_clean);
1916 }
5d4f98a2
YZ
1917 if (head->is_data) {
1918 ret = btrfs_del_csums(trans, root,
1919 node->bytenr,
1920 node->num_bytes);
1921 BUG_ON(ret);
1922 }
11833d66
YZ
1923 if (mark_free) {
1924 ret = btrfs_free_reserved_extent(root,
1925 node->bytenr,
1926 node->num_bytes);
1927 BUG_ON(ret);
1928 }
56bec294 1929 }
56bec294
CM
1930 mutex_unlock(&head->mutex);
1931 return 0;
1932 }
1933
5d4f98a2
YZ
1934 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1935 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1936 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1937 insert_reserved);
1938 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1939 node->type == BTRFS_SHARED_DATA_REF_KEY)
1940 ret = run_delayed_data_ref(trans, root, node, extent_op,
1941 insert_reserved);
1942 else
1943 BUG();
1944 return ret;
56bec294
CM
1945}
1946
1947static noinline struct btrfs_delayed_ref_node *
1948select_delayed_ref(struct btrfs_delayed_ref_head *head)
1949{
1950 struct rb_node *node;
1951 struct btrfs_delayed_ref_node *ref;
1952 int action = BTRFS_ADD_DELAYED_REF;
1953again:
1954 /*
1955 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1956 * this prevents ref count from going down to zero when
1957 * there still are pending delayed ref.
1958 */
1959 node = rb_prev(&head->node.rb_node);
1960 while (1) {
1961 if (!node)
1962 break;
1963 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1964 rb_node);
1965 if (ref->bytenr != head->node.bytenr)
1966 break;
5d4f98a2 1967 if (ref->action == action)
56bec294
CM
1968 return ref;
1969 node = rb_prev(node);
1970 }
1971 if (action == BTRFS_ADD_DELAYED_REF) {
1972 action = BTRFS_DROP_DELAYED_REF;
1973 goto again;
1974 }
1975 return NULL;
1976}
1977
c3e69d58
CM
1978static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1979 struct btrfs_root *root,
1980 struct list_head *cluster)
56bec294 1981{
56bec294
CM
1982 struct btrfs_delayed_ref_root *delayed_refs;
1983 struct btrfs_delayed_ref_node *ref;
1984 struct btrfs_delayed_ref_head *locked_ref = NULL;
5d4f98a2 1985 struct btrfs_delayed_extent_op *extent_op;
56bec294 1986 int ret;
c3e69d58 1987 int count = 0;
56bec294 1988 int must_insert_reserved = 0;
56bec294
CM
1989
1990 delayed_refs = &trans->transaction->delayed_refs;
56bec294
CM
1991 while (1) {
1992 if (!locked_ref) {
c3e69d58
CM
1993 /* pick a new head ref from the cluster list */
1994 if (list_empty(cluster))
56bec294 1995 break;
56bec294 1996
c3e69d58
CM
1997 locked_ref = list_entry(cluster->next,
1998 struct btrfs_delayed_ref_head, cluster);
1999
2000 /* grab the lock that says we are going to process
2001 * all the refs for this head */
2002 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2003
2004 /*
2005 * we may have dropped the spin lock to get the head
2006 * mutex lock, and that might have given someone else
2007 * time to free the head. If that's true, it has been
2008 * removed from our list and we can move on.
2009 */
2010 if (ret == -EAGAIN) {
2011 locked_ref = NULL;
2012 count++;
2013 continue;
56bec294
CM
2014 }
2015 }
a28ec197 2016
56bec294
CM
2017 /*
2018 * record the must insert reserved flag before we
2019 * drop the spin lock.
2020 */
2021 must_insert_reserved = locked_ref->must_insert_reserved;
2022 locked_ref->must_insert_reserved = 0;
7bb86316 2023
5d4f98a2
YZ
2024 extent_op = locked_ref->extent_op;
2025 locked_ref->extent_op = NULL;
2026
56bec294
CM
2027 /*
2028 * locked_ref is the head node, so we have to go one
2029 * node back for any delayed ref updates
2030 */
56bec294
CM
2031 ref = select_delayed_ref(locked_ref);
2032 if (!ref) {
2033 /* All delayed refs have been processed, Go ahead
2034 * and send the head node to run_one_delayed_ref,
2035 * so that any accounting fixes can happen
2036 */
2037 ref = &locked_ref->node;
5d4f98a2
YZ
2038
2039 if (extent_op && must_insert_reserved) {
2040 kfree(extent_op);
2041 extent_op = NULL;
2042 }
2043
2044 if (extent_op) {
2045 spin_unlock(&delayed_refs->lock);
2046
2047 ret = run_delayed_extent_op(trans, root,
2048 ref, extent_op);
2049 BUG_ON(ret);
2050 kfree(extent_op);
2051
2052 cond_resched();
2053 spin_lock(&delayed_refs->lock);
2054 continue;
2055 }
2056
c3e69d58 2057 list_del_init(&locked_ref->cluster);
56bec294
CM
2058 locked_ref = NULL;
2059 }
02217ed2 2060
56bec294
CM
2061 ref->in_tree = 0;
2062 rb_erase(&ref->rb_node, &delayed_refs->root);
2063 delayed_refs->num_entries--;
5d4f98a2 2064
56bec294 2065 spin_unlock(&delayed_refs->lock);
925baedd 2066
5d4f98a2 2067 ret = run_one_delayed_ref(trans, root, ref, extent_op,
56bec294
CM
2068 must_insert_reserved);
2069 BUG_ON(ret);
eb099670 2070
5d4f98a2
YZ
2071 btrfs_put_delayed_ref(ref);
2072 kfree(extent_op);
c3e69d58 2073 count++;
5d4f98a2 2074
c3e69d58
CM
2075 cond_resched();
2076 spin_lock(&delayed_refs->lock);
2077 }
2078 return count;
2079}
2080
2081/*
2082 * this starts processing the delayed reference count updates and
2083 * extent insertions we have queued up so far. count can be
2084 * 0, which means to process everything in the tree at the start
2085 * of the run (but not newly added entries), or it can be some target
2086 * number you'd like to process.
2087 */
2088int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2089 struct btrfs_root *root, unsigned long count)
2090{
2091 struct rb_node *node;
2092 struct btrfs_delayed_ref_root *delayed_refs;
2093 struct btrfs_delayed_ref_node *ref;
2094 struct list_head cluster;
2095 int ret;
2096 int run_all = count == (unsigned long)-1;
2097 int run_most = 0;
2098
2099 if (root == root->fs_info->extent_root)
2100 root = root->fs_info->tree_root;
2101
2102 delayed_refs = &trans->transaction->delayed_refs;
2103 INIT_LIST_HEAD(&cluster);
2104again:
2105 spin_lock(&delayed_refs->lock);
2106 if (count == 0) {
2107 count = delayed_refs->num_entries * 2;
2108 run_most = 1;
2109 }
2110 while (1) {
2111 if (!(run_all || run_most) &&
2112 delayed_refs->num_heads_ready < 64)
2113 break;
eb099670 2114
56bec294 2115 /*
c3e69d58
CM
2116 * go find something we can process in the rbtree. We start at
2117 * the beginning of the tree, and then build a cluster
2118 * of refs to process starting at the first one we are able to
2119 * lock
56bec294 2120 */
c3e69d58
CM
2121 ret = btrfs_find_ref_cluster(trans, &cluster,
2122 delayed_refs->run_delayed_start);
2123 if (ret)
56bec294
CM
2124 break;
2125
c3e69d58
CM
2126 ret = run_clustered_refs(trans, root, &cluster);
2127 BUG_ON(ret < 0);
2128
2129 count -= min_t(unsigned long, ret, count);
2130
2131 if (count == 0)
2132 break;
eb099670 2133 }
c3e69d58 2134
56bec294 2135 if (run_all) {
56bec294 2136 node = rb_first(&delayed_refs->root);
c3e69d58 2137 if (!node)
56bec294 2138 goto out;
c3e69d58 2139 count = (unsigned long)-1;
e9d0b13b 2140
56bec294
CM
2141 while (node) {
2142 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2143 rb_node);
2144 if (btrfs_delayed_ref_is_head(ref)) {
2145 struct btrfs_delayed_ref_head *head;
5caf2a00 2146
56bec294
CM
2147 head = btrfs_delayed_node_to_head(ref);
2148 atomic_inc(&ref->refs);
2149
2150 spin_unlock(&delayed_refs->lock);
2151 mutex_lock(&head->mutex);
2152 mutex_unlock(&head->mutex);
2153
2154 btrfs_put_delayed_ref(ref);
1887be66 2155 cond_resched();
56bec294
CM
2156 goto again;
2157 }
2158 node = rb_next(node);
2159 }
2160 spin_unlock(&delayed_refs->lock);
56bec294
CM
2161 schedule_timeout(1);
2162 goto again;
5f39d397 2163 }
54aa1f4d 2164out:
c3e69d58 2165 spin_unlock(&delayed_refs->lock);
a28ec197
CM
2166 return 0;
2167}
2168
5d4f98a2
YZ
2169int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2170 struct btrfs_root *root,
2171 u64 bytenr, u64 num_bytes, u64 flags,
2172 int is_data)
2173{
2174 struct btrfs_delayed_extent_op *extent_op;
2175 int ret;
2176
2177 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2178 if (!extent_op)
2179 return -ENOMEM;
2180
2181 extent_op->flags_to_set = flags;
2182 extent_op->update_flags = 1;
2183 extent_op->update_key = 0;
2184 extent_op->is_data = is_data ? 1 : 0;
2185
2186 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2187 if (ret)
2188 kfree(extent_op);
2189 return ret;
2190}
2191
2192static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2193 struct btrfs_root *root,
2194 struct btrfs_path *path,
2195 u64 objectid, u64 offset, u64 bytenr)
2196{
2197 struct btrfs_delayed_ref_head *head;
2198 struct btrfs_delayed_ref_node *ref;
2199 struct btrfs_delayed_data_ref *data_ref;
2200 struct btrfs_delayed_ref_root *delayed_refs;
2201 struct rb_node *node;
2202 int ret = 0;
2203
2204 ret = -ENOENT;
2205 delayed_refs = &trans->transaction->delayed_refs;
2206 spin_lock(&delayed_refs->lock);
2207 head = btrfs_find_delayed_ref_head(trans, bytenr);
2208 if (!head)
2209 goto out;
2210
2211 if (!mutex_trylock(&head->mutex)) {
2212 atomic_inc(&head->node.refs);
2213 spin_unlock(&delayed_refs->lock);
2214
2215 btrfs_release_path(root->fs_info->extent_root, path);
2216
2217 mutex_lock(&head->mutex);
2218 mutex_unlock(&head->mutex);
2219 btrfs_put_delayed_ref(&head->node);
2220 return -EAGAIN;
2221 }
2222
2223 node = rb_prev(&head->node.rb_node);
2224 if (!node)
2225 goto out_unlock;
2226
2227 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2228
2229 if (ref->bytenr != bytenr)
2230 goto out_unlock;
2231
2232 ret = 1;
2233 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2234 goto out_unlock;
2235
2236 data_ref = btrfs_delayed_node_to_data_ref(ref);
2237
2238 node = rb_prev(node);
2239 if (node) {
2240 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2241 if (ref->bytenr == bytenr)
2242 goto out_unlock;
2243 }
2244
2245 if (data_ref->root != root->root_key.objectid ||
2246 data_ref->objectid != objectid || data_ref->offset != offset)
2247 goto out_unlock;
2248
2249 ret = 0;
2250out_unlock:
2251 mutex_unlock(&head->mutex);
2252out:
2253 spin_unlock(&delayed_refs->lock);
2254 return ret;
2255}
2256
2257static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2258 struct btrfs_root *root,
2259 struct btrfs_path *path,
2260 u64 objectid, u64 offset, u64 bytenr)
be20aa9d
CM
2261{
2262 struct btrfs_root *extent_root = root->fs_info->extent_root;
f321e491 2263 struct extent_buffer *leaf;
5d4f98a2
YZ
2264 struct btrfs_extent_data_ref *ref;
2265 struct btrfs_extent_inline_ref *iref;
2266 struct btrfs_extent_item *ei;
f321e491 2267 struct btrfs_key key;
5d4f98a2 2268 u32 item_size;
be20aa9d 2269 int ret;
925baedd 2270
be20aa9d 2271 key.objectid = bytenr;
31840ae1 2272 key.offset = (u64)-1;
f321e491 2273 key.type = BTRFS_EXTENT_ITEM_KEY;
be20aa9d 2274
be20aa9d
CM
2275 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2276 if (ret < 0)
2277 goto out;
2278 BUG_ON(ret == 0);
80ff3856
YZ
2279
2280 ret = -ENOENT;
2281 if (path->slots[0] == 0)
31840ae1 2282 goto out;
be20aa9d 2283
31840ae1 2284 path->slots[0]--;
f321e491 2285 leaf = path->nodes[0];
5d4f98a2 2286 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
be20aa9d 2287
5d4f98a2 2288 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
be20aa9d 2289 goto out;
f321e491 2290
5d4f98a2
YZ
2291 ret = 1;
2292 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2293#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2294 if (item_size < sizeof(*ei)) {
2295 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2296 goto out;
2297 }
2298#endif
2299 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
bd09835d 2300
5d4f98a2
YZ
2301 if (item_size != sizeof(*ei) +
2302 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2303 goto out;
be20aa9d 2304
5d4f98a2
YZ
2305 if (btrfs_extent_generation(leaf, ei) <=
2306 btrfs_root_last_snapshot(&root->root_item))
2307 goto out;
2308
2309 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2310 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2311 BTRFS_EXTENT_DATA_REF_KEY)
2312 goto out;
2313
2314 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2315 if (btrfs_extent_refs(leaf, ei) !=
2316 btrfs_extent_data_ref_count(leaf, ref) ||
2317 btrfs_extent_data_ref_root(leaf, ref) !=
2318 root->root_key.objectid ||
2319 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2320 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2321 goto out;
2322
2323 ret = 0;
2324out:
2325 return ret;
2326}
2327
2328int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2329 struct btrfs_root *root,
2330 u64 objectid, u64 offset, u64 bytenr)
2331{
2332 struct btrfs_path *path;
2333 int ret;
2334 int ret2;
2335
2336 path = btrfs_alloc_path();
2337 if (!path)
2338 return -ENOENT;
2339
2340 do {
2341 ret = check_committed_ref(trans, root, path, objectid,
2342 offset, bytenr);
2343 if (ret && ret != -ENOENT)
f321e491 2344 goto out;
80ff3856 2345
5d4f98a2
YZ
2346 ret2 = check_delayed_ref(trans, root, path, objectid,
2347 offset, bytenr);
2348 } while (ret2 == -EAGAIN);
2349
2350 if (ret2 && ret2 != -ENOENT) {
2351 ret = ret2;
2352 goto out;
f321e491 2353 }
5d4f98a2
YZ
2354
2355 if (ret != -ENOENT || ret2 != -ENOENT)
2356 ret = 0;
be20aa9d 2357out:
80ff3856 2358 btrfs_free_path(path);
f321e491 2359 return ret;
be20aa9d 2360}
c5739bba 2361
5d4f98a2 2362#if 0
31840ae1
ZY
2363int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2364 struct extent_buffer *buf, u32 nr_extents)
02217ed2 2365{
5f39d397 2366 struct btrfs_key key;
6407bf6d 2367 struct btrfs_file_extent_item *fi;
e4657689
ZY
2368 u64 root_gen;
2369 u32 nritems;
02217ed2 2370 int i;
db94535d 2371 int level;
31840ae1 2372 int ret = 0;
e4657689 2373 int shared = 0;
a28ec197 2374
3768f368 2375 if (!root->ref_cows)
a28ec197 2376 return 0;
5f39d397 2377
e4657689
ZY
2378 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2379 shared = 0;
2380 root_gen = root->root_key.offset;
2381 } else {
2382 shared = 1;
2383 root_gen = trans->transid - 1;
2384 }
2385
db94535d 2386 level = btrfs_header_level(buf);
5f39d397 2387 nritems = btrfs_header_nritems(buf);
4a096752 2388
31840ae1 2389 if (level == 0) {
31153d81
YZ
2390 struct btrfs_leaf_ref *ref;
2391 struct btrfs_extent_info *info;
2392
31840ae1 2393 ref = btrfs_alloc_leaf_ref(root, nr_extents);
31153d81 2394 if (!ref) {
31840ae1 2395 ret = -ENOMEM;
31153d81
YZ
2396 goto out;
2397 }
2398
e4657689 2399 ref->root_gen = root_gen;
31153d81
YZ
2400 ref->bytenr = buf->start;
2401 ref->owner = btrfs_header_owner(buf);
2402 ref->generation = btrfs_header_generation(buf);
31840ae1 2403 ref->nritems = nr_extents;
31153d81 2404 info = ref->extents;
bcc63abb 2405
31840ae1 2406 for (i = 0; nr_extents > 0 && i < nritems; i++) {
31153d81
YZ
2407 u64 disk_bytenr;
2408 btrfs_item_key_to_cpu(buf, &key, i);
2409 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2410 continue;
2411 fi = btrfs_item_ptr(buf, i,
2412 struct btrfs_file_extent_item);
2413 if (btrfs_file_extent_type(buf, fi) ==
2414 BTRFS_FILE_EXTENT_INLINE)
2415 continue;
2416 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2417 if (disk_bytenr == 0)
2418 continue;
2419
2420 info->bytenr = disk_bytenr;
2421 info->num_bytes =
2422 btrfs_file_extent_disk_num_bytes(buf, fi);
2423 info->objectid = key.objectid;
2424 info->offset = key.offset;
2425 info++;
2426 }
2427
e4657689 2428 ret = btrfs_add_leaf_ref(root, ref, shared);
5b84e8d6
YZ
2429 if (ret == -EEXIST && shared) {
2430 struct btrfs_leaf_ref *old;
2431 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2432 BUG_ON(!old);
2433 btrfs_remove_leaf_ref(root, old);
2434 btrfs_free_leaf_ref(root, old);
2435 ret = btrfs_add_leaf_ref(root, ref, shared);
2436 }
31153d81 2437 WARN_ON(ret);
bcc63abb 2438 btrfs_free_leaf_ref(root, ref);
31153d81
YZ
2439 }
2440out:
31840ae1
ZY
2441 return ret;
2442}
2443
b7a9f29f
CM
2444/* when a block goes through cow, we update the reference counts of
2445 * everything that block points to. The internal pointers of the block
2446 * can be in just about any order, and it is likely to have clusters of
2447 * things that are close together and clusters of things that are not.
2448 *
2449 * To help reduce the seeks that come with updating all of these reference
2450 * counts, sort them by byte number before actual updates are done.
2451 *
2452 * struct refsort is used to match byte number to slot in the btree block.
2453 * we sort based on the byte number and then use the slot to actually
2454 * find the item.
bd56b302
CM
2455 *
2456 * struct refsort is smaller than strcut btrfs_item and smaller than
2457 * struct btrfs_key_ptr. Since we're currently limited to the page size
2458 * for a btree block, there's no way for a kmalloc of refsorts for a
2459 * single node to be bigger than a page.
b7a9f29f
CM
2460 */
2461struct refsort {
2462 u64 bytenr;
2463 u32 slot;
2464};
2465
2466/*
2467 * for passing into sort()
2468 */
2469static int refsort_cmp(const void *a_void, const void *b_void)
2470{
2471 const struct refsort *a = a_void;
2472 const struct refsort *b = b_void;
2473
2474 if (a->bytenr < b->bytenr)
2475 return -1;
2476 if (a->bytenr > b->bytenr)
2477 return 1;
2478 return 0;
2479}
5d4f98a2 2480#endif
b7a9f29f 2481
5d4f98a2 2482static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
b7a9f29f 2483 struct btrfs_root *root,
5d4f98a2
YZ
2484 struct extent_buffer *buf,
2485 int full_backref, int inc)
31840ae1
ZY
2486{
2487 u64 bytenr;
5d4f98a2
YZ
2488 u64 num_bytes;
2489 u64 parent;
31840ae1 2490 u64 ref_root;
31840ae1 2491 u32 nritems;
31840ae1
ZY
2492 struct btrfs_key key;
2493 struct btrfs_file_extent_item *fi;
2494 int i;
2495 int level;
2496 int ret = 0;
31840ae1 2497 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
5d4f98a2 2498 u64, u64, u64, u64, u64, u64);
31840ae1
ZY
2499
2500 ref_root = btrfs_header_owner(buf);
31840ae1
ZY
2501 nritems = btrfs_header_nritems(buf);
2502 level = btrfs_header_level(buf);
2503
5d4f98a2
YZ
2504 if (!root->ref_cows && level == 0)
2505 return 0;
31840ae1 2506
5d4f98a2
YZ
2507 if (inc)
2508 process_func = btrfs_inc_extent_ref;
2509 else
2510 process_func = btrfs_free_extent;
31840ae1 2511
5d4f98a2
YZ
2512 if (full_backref)
2513 parent = buf->start;
2514 else
2515 parent = 0;
2516
2517 for (i = 0; i < nritems; i++) {
31840ae1 2518 if (level == 0) {
5d4f98a2 2519 btrfs_item_key_to_cpu(buf, &key, i);
31840ae1
ZY
2520 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2521 continue;
5d4f98a2 2522 fi = btrfs_item_ptr(buf, i,
31840ae1
ZY
2523 struct btrfs_file_extent_item);
2524 if (btrfs_file_extent_type(buf, fi) ==
2525 BTRFS_FILE_EXTENT_INLINE)
2526 continue;
2527 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2528 if (bytenr == 0)
2529 continue;
5d4f98a2
YZ
2530
2531 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2532 key.offset -= btrfs_file_extent_offset(buf, fi);
2533 ret = process_func(trans, root, bytenr, num_bytes,
2534 parent, ref_root, key.objectid,
2535 key.offset);
31840ae1
ZY
2536 if (ret)
2537 goto fail;
2538 } else {
5d4f98a2
YZ
2539 bytenr = btrfs_node_blockptr(buf, i);
2540 num_bytes = btrfs_level_size(root, level - 1);
2541 ret = process_func(trans, root, bytenr, num_bytes,
2542 parent, ref_root, level - 1, 0);
31840ae1
ZY
2543 if (ret)
2544 goto fail;
2545 }
2546 }
2547 return 0;
2548fail:
5d4f98a2
YZ
2549 BUG();
2550 return ret;
2551}
2552
2553int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2554 struct extent_buffer *buf, int full_backref)
2555{
2556 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2557}
2558
2559int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2560 struct extent_buffer *buf, int full_backref)
2561{
2562 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
31840ae1
ZY
2563}
2564
9078a3e1
CM
2565static int write_one_cache_group(struct btrfs_trans_handle *trans,
2566 struct btrfs_root *root,
2567 struct btrfs_path *path,
2568 struct btrfs_block_group_cache *cache)
2569{
2570 int ret;
9078a3e1 2571 struct btrfs_root *extent_root = root->fs_info->extent_root;
5f39d397
CM
2572 unsigned long bi;
2573 struct extent_buffer *leaf;
9078a3e1 2574
9078a3e1 2575 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
54aa1f4d
CM
2576 if (ret < 0)
2577 goto fail;
9078a3e1 2578 BUG_ON(ret);
5f39d397
CM
2579
2580 leaf = path->nodes[0];
2581 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2582 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2583 btrfs_mark_buffer_dirty(leaf);
9078a3e1 2584 btrfs_release_path(extent_root, path);
54aa1f4d 2585fail:
9078a3e1
CM
2586 if (ret)
2587 return ret;
9078a3e1
CM
2588 return 0;
2589
2590}
2591
4a8c9a62
YZ
2592static struct btrfs_block_group_cache *
2593next_block_group(struct btrfs_root *root,
2594 struct btrfs_block_group_cache *cache)
2595{
2596 struct rb_node *node;
2597 spin_lock(&root->fs_info->block_group_cache_lock);
2598 node = rb_next(&cache->cache_node);
2599 btrfs_put_block_group(cache);
2600 if (node) {
2601 cache = rb_entry(node, struct btrfs_block_group_cache,
2602 cache_node);
11dfe35a 2603 btrfs_get_block_group(cache);
4a8c9a62
YZ
2604 } else
2605 cache = NULL;
2606 spin_unlock(&root->fs_info->block_group_cache_lock);
2607 return cache;
2608}
2609
96b5179d
CM
2610int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2611 struct btrfs_root *root)
9078a3e1 2612{
4a8c9a62 2613 struct btrfs_block_group_cache *cache;
9078a3e1 2614 int err = 0;
9078a3e1 2615 struct btrfs_path *path;
96b5179d 2616 u64 last = 0;
9078a3e1
CM
2617
2618 path = btrfs_alloc_path();
2619 if (!path)
2620 return -ENOMEM;
2621
d397712b 2622 while (1) {
4a8c9a62
YZ
2623 if (last == 0) {
2624 err = btrfs_run_delayed_refs(trans, root,
2625 (unsigned long)-1);
2626 BUG_ON(err);
0f9dd46c 2627 }
54aa1f4d 2628
4a8c9a62
YZ
2629 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2630 while (cache) {
2631 if (cache->dirty)
2632 break;
2633 cache = next_block_group(root, cache);
2634 }
2635 if (!cache) {
2636 if (last == 0)
2637 break;
2638 last = 0;
2639 continue;
2640 }
0f9dd46c 2641
e8569813 2642 cache->dirty = 0;
4a8c9a62 2643 last = cache->key.objectid + cache->key.offset;
0f9dd46c 2644
4a8c9a62
YZ
2645 err = write_one_cache_group(trans, root, path, cache);
2646 BUG_ON(err);
2647 btrfs_put_block_group(cache);
9078a3e1 2648 }
4a8c9a62 2649
9078a3e1 2650 btrfs_free_path(path);
4a8c9a62 2651 return 0;
9078a3e1
CM
2652}
2653
d2fb3437
YZ
2654int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2655{
2656 struct btrfs_block_group_cache *block_group;
2657 int readonly = 0;
2658
2659 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2660 if (!block_group || block_group->ro)
2661 readonly = 1;
2662 if (block_group)
fa9c0d79 2663 btrfs_put_block_group(block_group);
d2fb3437
YZ
2664 return readonly;
2665}
2666
593060d7
CM
2667static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2668 u64 total_bytes, u64 bytes_used,
2669 struct btrfs_space_info **space_info)
2670{
2671 struct btrfs_space_info *found;
b742bb82
YZ
2672 int i;
2673 int factor;
2674
2675 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2676 BTRFS_BLOCK_GROUP_RAID10))
2677 factor = 2;
2678 else
2679 factor = 1;
593060d7
CM
2680
2681 found = __find_space_info(info, flags);
2682 if (found) {
25179201 2683 spin_lock(&found->lock);
593060d7
CM
2684 found->total_bytes += total_bytes;
2685 found->bytes_used += bytes_used;
b742bb82 2686 found->disk_used += bytes_used * factor;
8f18cf13 2687 found->full = 0;
25179201 2688 spin_unlock(&found->lock);
593060d7
CM
2689 *space_info = found;
2690 return 0;
2691 }
c146afad 2692 found = kzalloc(sizeof(*found), GFP_NOFS);
593060d7
CM
2693 if (!found)
2694 return -ENOMEM;
2695
b742bb82
YZ
2696 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2697 INIT_LIST_HEAD(&found->block_groups[i]);
80eb234a 2698 init_rwsem(&found->groups_sem);
0f9dd46c 2699 spin_lock_init(&found->lock);
b742bb82
YZ
2700 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
2701 BTRFS_BLOCK_GROUP_SYSTEM |
2702 BTRFS_BLOCK_GROUP_METADATA);
593060d7
CM
2703 found->total_bytes = total_bytes;
2704 found->bytes_used = bytes_used;
b742bb82 2705 found->disk_used = bytes_used * factor;
593060d7 2706 found->bytes_pinned = 0;
e8569813 2707 found->bytes_reserved = 0;
c146afad 2708 found->bytes_readonly = 0;
6a63209f 2709 found->bytes_delalloc = 0;
593060d7 2710 found->full = 0;
0ef3e66b 2711 found->force_alloc = 0;
593060d7 2712 *space_info = found;
4184ea7f 2713 list_add_rcu(&found->list, &info->space_info);
817d52f8 2714 atomic_set(&found->caching_threads, 0);
593060d7
CM
2715 return 0;
2716}
2717
8790d502
CM
2718static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2719{
2720 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
611f0e00 2721 BTRFS_BLOCK_GROUP_RAID1 |
321aecc6 2722 BTRFS_BLOCK_GROUP_RAID10 |
611f0e00 2723 BTRFS_BLOCK_GROUP_DUP);
8790d502
CM
2724 if (extra_flags) {
2725 if (flags & BTRFS_BLOCK_GROUP_DATA)
2726 fs_info->avail_data_alloc_bits |= extra_flags;
2727 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2728 fs_info->avail_metadata_alloc_bits |= extra_flags;
2729 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2730 fs_info->avail_system_alloc_bits |= extra_flags;
2731 }
2732}
593060d7 2733
c146afad
YZ
2734static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2735{
2736 spin_lock(&cache->space_info->lock);
2737 spin_lock(&cache->lock);
2738 if (!cache->ro) {
2739 cache->space_info->bytes_readonly += cache->key.offset -
2740 btrfs_block_group_used(&cache->item);
2741 cache->ro = 1;
2742 }
2743 spin_unlock(&cache->lock);
2744 spin_unlock(&cache->space_info->lock);
2745}
2746
2b82032c 2747u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
ec44a35c 2748{
2b82032c 2749 u64 num_devices = root->fs_info->fs_devices->rw_devices;
a061fc8d
CM
2750
2751 if (num_devices == 1)
2752 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2753 if (num_devices < 4)
2754 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2755
ec44a35c
CM
2756 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2757 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
a061fc8d 2758 BTRFS_BLOCK_GROUP_RAID10))) {
ec44a35c 2759 flags &= ~BTRFS_BLOCK_GROUP_DUP;
a061fc8d 2760 }
ec44a35c
CM
2761
2762 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
a061fc8d 2763 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
ec44a35c 2764 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
a061fc8d 2765 }
ec44a35c
CM
2766
2767 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2768 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2769 (flags & BTRFS_BLOCK_GROUP_RAID10) |
2770 (flags & BTRFS_BLOCK_GROUP_DUP)))
2771 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2772 return flags;
2773}
2774
b742bb82 2775static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
6a63209f 2776{
b742bb82
YZ
2777 if (flags & BTRFS_BLOCK_GROUP_DATA)
2778 flags |= root->fs_info->avail_data_alloc_bits &
2779 root->fs_info->data_alloc_profile;
2780 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2781 flags |= root->fs_info->avail_system_alloc_bits &
2782 root->fs_info->system_alloc_profile;
2783 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
2784 flags |= root->fs_info->avail_metadata_alloc_bits &
2785 root->fs_info->metadata_alloc_profile;
2786 return btrfs_reduce_alloc_profile(root, flags);
2787}
2788
2789static u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
2790{
2791 u64 flags;
6a63209f 2792
b742bb82
YZ
2793 if (data)
2794 flags = BTRFS_BLOCK_GROUP_DATA;
2795 else if (root == root->fs_info->chunk_root)
2796 flags = BTRFS_BLOCK_GROUP_SYSTEM;
2797 else
2798 flags = BTRFS_BLOCK_GROUP_METADATA;
2799
2800 return get_alloc_profile(root, flags);
6a63209f
JB
2801}
2802
2803void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2804{
2805 u64 alloc_target;
2806
2807 alloc_target = btrfs_get_alloc_profile(root, 1);
2808 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2809 alloc_target);
2810}
2811
9ed74f2d
JB
2812static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2813{
2814 u64 num_bytes;
2815 int level;
2816
2817 level = BTRFS_MAX_LEVEL - 2;
2818 /*
2819 * NOTE: these calculations are absolutely the worst possible case.
2820 * This assumes that _every_ item we insert will require a new leaf, and
2821 * that the tree has grown to its maximum level size.
2822 */
2823
2824 /*
2825 * for every item we insert we could insert both an extent item and a
2826 * extent ref item. Then for ever item we insert, we will need to cow
2827 * both the original leaf, plus the leaf to the left and right of it.
2828 *
2829 * Unless we are talking about the extent root, then we just want the
2830 * number of items * 2, since we just need the extent item plus its ref.
2831 */
2832 if (root == root->fs_info->extent_root)
2833 num_bytes = num_items * 2;
2834 else
2835 num_bytes = (num_items + (2 * num_items)) * 3;
2836
2837 /*
2838 * num_bytes is total number of leaves we could need times the leaf
2839 * size, and then for every leaf we could end up cow'ing 2 nodes per
2840 * level, down to the leaf level.
2841 */
2842 num_bytes = (num_bytes * root->leafsize) +
2843 (num_bytes * (level * 2)) * root->nodesize;
2844
2845 return num_bytes;
2846}
2847
6a63209f 2848/*
9ed74f2d
JB
2849 * Unreserve metadata space for delalloc. If we have less reserved credits than
2850 * we have extents, this function does nothing.
6a63209f 2851 */
9ed74f2d
JB
2852int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2853 struct inode *inode, int num_items)
6a63209f
JB
2854{
2855 struct btrfs_fs_info *info = root->fs_info;
2856 struct btrfs_space_info *meta_sinfo;
9ed74f2d
JB
2857 u64 num_bytes;
2858 u64 alloc_target;
2859 bool bug = false;
6a63209f
JB
2860
2861 /* get the space info for where the metadata will live */
2862 alloc_target = btrfs_get_alloc_profile(root, 0);
2863 meta_sinfo = __find_space_info(info, alloc_target);
2864
9ed74f2d
JB
2865 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2866 num_items);
2867
6a63209f 2868 spin_lock(&meta_sinfo->lock);
32c00aff
JB
2869 spin_lock(&BTRFS_I(inode)->accounting_lock);
2870 if (BTRFS_I(inode)->reserved_extents <=
2871 BTRFS_I(inode)->outstanding_extents) {
2872 spin_unlock(&BTRFS_I(inode)->accounting_lock);
9ed74f2d
JB
2873 spin_unlock(&meta_sinfo->lock);
2874 return 0;
2875 }
32c00aff 2876 spin_unlock(&BTRFS_I(inode)->accounting_lock);
9ed74f2d 2877
287a0ab9 2878 BTRFS_I(inode)->reserved_extents -= num_items;
32c00aff 2879 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
9ed74f2d
JB
2880
2881 if (meta_sinfo->bytes_delalloc < num_bytes) {
2882 bug = true;
2883 meta_sinfo->bytes_delalloc = 0;
2884 } else {
2885 meta_sinfo->bytes_delalloc -= num_bytes;
2886 }
2887 spin_unlock(&meta_sinfo->lock);
2888
2889 BUG_ON(bug);
2890
2891 return 0;
2892}
6a63209f 2893
9ed74f2d
JB
2894static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2895{
2896 u64 thresh;
2897
2898 thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2899 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2900 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2901 meta_sinfo->bytes_may_use;
6a63209f 2902
9ed74f2d
JB
2903 thresh = meta_sinfo->total_bytes - thresh;
2904 thresh *= 80;
6a63209f 2905 do_div(thresh, 100);
9ed74f2d
JB
2906 if (thresh <= meta_sinfo->bytes_delalloc)
2907 meta_sinfo->force_delalloc = 1;
2908 else
2909 meta_sinfo->force_delalloc = 0;
2910}
6a63209f 2911
9ed74f2d
JB
2912/*
2913 * Reserve metadata space for delalloc.
2914 */
2915int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2916 struct inode *inode, int num_items)
2917{
2918 struct btrfs_fs_info *info = root->fs_info;
2919 struct btrfs_space_info *meta_sinfo;
2920 u64 num_bytes;
2921 u64 used;
2922 u64 alloc_target;
2923 int flushed = 0;
2924 int force_delalloc;
2925
2926 /* get the space info for where the metadata will live */
2927 alloc_target = btrfs_get_alloc_profile(root, 0);
2928 meta_sinfo = __find_space_info(info, alloc_target);
2929
2930 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2931 num_items);
2932again:
2933 spin_lock(&meta_sinfo->lock);
2934
2935 force_delalloc = meta_sinfo->force_delalloc;
2936
2937 if (unlikely(!meta_sinfo->bytes_root))
2938 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
2939
2940 if (!flushed)
2941 meta_sinfo->bytes_delalloc += num_bytes;
2942
2943 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2944 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2945 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2946 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
2947
2948 if (used > meta_sinfo->total_bytes) {
2949 flushed++;
2950
2951 if (flushed == 1) {
424499db
YZ
2952 if (maybe_allocate_chunk(NULL, root, meta_sinfo,
2953 num_bytes))
9ed74f2d
JB
2954 goto again;
2955 flushed++;
2956 } else {
4e06bdd6 2957 spin_unlock(&meta_sinfo->lock);
9ed74f2d 2958 }
4e06bdd6 2959
9ed74f2d
JB
2960 if (flushed == 2) {
2961 filemap_flush(inode->i_mapping);
2962 goto again;
2963 } else if (flushed == 3) {
5da9d01b 2964 shrink_delalloc(NULL, root, meta_sinfo, num_bytes);
4e06bdd6
JB
2965 goto again;
2966 }
9ed74f2d
JB
2967 spin_lock(&meta_sinfo->lock);
2968 meta_sinfo->bytes_delalloc -= num_bytes;
6a63209f 2969 spin_unlock(&meta_sinfo->lock);
9ed74f2d 2970 printk(KERN_ERR "enospc, has %d, reserved %d\n",
32c00aff
JB
2971 BTRFS_I(inode)->outstanding_extents,
2972 BTRFS_I(inode)->reserved_extents);
9ed74f2d
JB
2973 dump_space_info(meta_sinfo, 0, 0);
2974 return -ENOSPC;
2975 }
4e06bdd6 2976
287a0ab9 2977 BTRFS_I(inode)->reserved_extents += num_items;
9ed74f2d
JB
2978 check_force_delalloc(meta_sinfo);
2979 spin_unlock(&meta_sinfo->lock);
2980
2981 if (!flushed && force_delalloc)
2982 filemap_flush(inode->i_mapping);
2983
2984 return 0;
2985}
2986
2987/*
2988 * unreserve num_items number of items worth of metadata space. This needs to
2989 * be paired with btrfs_reserve_metadata_space.
2990 *
2991 * NOTE: if you have the option, run this _AFTER_ you do a
2992 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
2993 * oprations which will result in more used metadata, so we want to make sure we
2994 * can do that without issue.
2995 */
2996int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
2997{
2998 struct btrfs_fs_info *info = root->fs_info;
2999 struct btrfs_space_info *meta_sinfo;
3000 u64 num_bytes;
3001 u64 alloc_target;
3002 bool bug = false;
3003
3004 /* get the space info for where the metadata will live */
3005 alloc_target = btrfs_get_alloc_profile(root, 0);
3006 meta_sinfo = __find_space_info(info, alloc_target);
3007
3008 num_bytes = calculate_bytes_needed(root, num_items);
3009
3010 spin_lock(&meta_sinfo->lock);
3011 if (meta_sinfo->bytes_may_use < num_bytes) {
3012 bug = true;
3013 meta_sinfo->bytes_may_use = 0;
3014 } else {
3015 meta_sinfo->bytes_may_use -= num_bytes;
3016 }
3017 spin_unlock(&meta_sinfo->lock);
3018
3019 BUG_ON(bug);
3020
3021 return 0;
3022}
3023
3024/*
3025 * Reserve some metadata space for use. We'll calculate the worste case number
3026 * of bytes that would be needed to modify num_items number of items. If we
3027 * have space, fantastic, if not, you get -ENOSPC. Please call
3028 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3029 * items you reserved, since whatever metadata you needed should have already
3030 * been allocated.
3031 *
3032 * This will commit the transaction to make more space if we don't have enough
3033 * metadata space. THe only time we don't do this is if we're reserving space
3034 * inside of a transaction, then we will just return -ENOSPC and it is the
3035 * callers responsibility to handle it properly.
3036 */
3037int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3038{
3039 struct btrfs_fs_info *info = root->fs_info;
3040 struct btrfs_space_info *meta_sinfo;
3041 u64 num_bytes;
3042 u64 used;
3043 u64 alloc_target;
3044 int retries = 0;
3045
3046 /* get the space info for where the metadata will live */
3047 alloc_target = btrfs_get_alloc_profile(root, 0);
3048 meta_sinfo = __find_space_info(info, alloc_target);
3049
3050 num_bytes = calculate_bytes_needed(root, num_items);
3051again:
3052 spin_lock(&meta_sinfo->lock);
3053
3054 if (unlikely(!meta_sinfo->bytes_root))
3055 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3056
3057 if (!retries)
3058 meta_sinfo->bytes_may_use += num_bytes;
3059
3060 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3061 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3062 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3063 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3064
3065 if (used > meta_sinfo->total_bytes) {
3066 retries++;
3067 if (retries == 1) {
424499db
YZ
3068 if (maybe_allocate_chunk(NULL, root, meta_sinfo,
3069 num_bytes))
9ed74f2d
JB
3070 goto again;
3071 retries++;
3072 } else {
3073 spin_unlock(&meta_sinfo->lock);
3074 }
3075
3076 if (retries == 2) {
5da9d01b 3077 shrink_delalloc(NULL, root, meta_sinfo, num_bytes);
4e06bdd6
JB
3078 goto again;
3079 }
9ed74f2d
JB
3080 spin_lock(&meta_sinfo->lock);
3081 meta_sinfo->bytes_may_use -= num_bytes;
3082 spin_unlock(&meta_sinfo->lock);
3083
3084 dump_space_info(meta_sinfo, 0, 0);
6a63209f
JB
3085 return -ENOSPC;
3086 }
9ed74f2d
JB
3087
3088 check_force_delalloc(meta_sinfo);
6a63209f
JB
3089 spin_unlock(&meta_sinfo->lock);
3090
3091 return 0;
3092}
3093
3094/*
3095 * This will check the space that the inode allocates from to make sure we have
3096 * enough space for bytes.
3097 */
3098int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3099 u64 bytes)
3100{
3101 struct btrfs_space_info *data_sinfo;
ab6e2410 3102 u64 used;
5da9d01b 3103 int ret = 0, committed = 0;
6a63209f
JB
3104
3105 /* make sure bytes are sectorsize aligned */
3106 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3107
3108 data_sinfo = BTRFS_I(inode)->space_info;
33b4d47f
CM
3109 if (!data_sinfo)
3110 goto alloc;
3111
6a63209f
JB
3112again:
3113 /* make sure we have enough space to handle the data first */
3114 spin_lock(&data_sinfo->lock);
ab6e2410
JB
3115 used = data_sinfo->bytes_used + data_sinfo->bytes_delalloc +
3116 data_sinfo->bytes_reserved + data_sinfo->bytes_pinned +
3117 data_sinfo->bytes_readonly + data_sinfo->bytes_may_use +
3118 data_sinfo->bytes_super;
3119
3120 if (used + bytes > data_sinfo->total_bytes) {
4e06bdd6
JB
3121 struct btrfs_trans_handle *trans;
3122
6a63209f
JB
3123 /*
3124 * if we don't have enough free bytes in this space then we need
3125 * to alloc a new chunk.
3126 */
3127 if (!data_sinfo->full) {
3128 u64 alloc_target;
6a63209f
JB
3129
3130 data_sinfo->force_alloc = 1;
3131 spin_unlock(&data_sinfo->lock);
33b4d47f 3132alloc:
6a63209f
JB
3133 alloc_target = btrfs_get_alloc_profile(root, 1);
3134 trans = btrfs_start_transaction(root, 1);
3135 if (!trans)
3136 return -ENOMEM;
3137
3138 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3139 bytes + 2 * 1024 * 1024,
3140 alloc_target, 0);
3141 btrfs_end_transaction(trans, root);
3142 if (ret)
3143 return ret;
33b4d47f
CM
3144
3145 if (!data_sinfo) {
3146 btrfs_set_inode_space_info(root, inode);
3147 data_sinfo = BTRFS_I(inode)->space_info;
3148 }
6a63209f
JB
3149 goto again;
3150 }
3151 spin_unlock(&data_sinfo->lock);
4e06bdd6
JB
3152
3153 /* commit the current transaction and try again */
dd7e0b7b 3154 if (!committed && !root->fs_info->open_ioctl_trans) {
4e06bdd6
JB
3155 committed = 1;
3156 trans = btrfs_join_transaction(root, 1);
3157 if (!trans)
3158 return -ENOMEM;
3159 ret = btrfs_commit_transaction(trans, root);
3160 if (ret)
3161 return ret;
3162 goto again;
3163 }
3164
6a63209f
JB
3165 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
3166 ", %llu bytes_used, %llu bytes_reserved, "
68f5a38c 3167 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
21380931
JB
3168 "%llu total\n", (unsigned long long)bytes,
3169 (unsigned long long)data_sinfo->bytes_delalloc,
3170 (unsigned long long)data_sinfo->bytes_used,
3171 (unsigned long long)data_sinfo->bytes_reserved,
3172 (unsigned long long)data_sinfo->bytes_pinned,
3173 (unsigned long long)data_sinfo->bytes_readonly,
3174 (unsigned long long)data_sinfo->bytes_may_use,
3175 (unsigned long long)data_sinfo->total_bytes);
6a63209f
JB
3176 return -ENOSPC;
3177 }
3178 data_sinfo->bytes_may_use += bytes;
3179 BTRFS_I(inode)->reserved_bytes += bytes;
3180 spin_unlock(&data_sinfo->lock);
3181
9ed74f2d 3182 return 0;
6a63209f
JB
3183}
3184
3185/*
3186 * if there was an error for whatever reason after calling
3187 * btrfs_check_data_free_space, call this so we can cleanup the counters.
3188 */
3189void btrfs_free_reserved_data_space(struct btrfs_root *root,
3190 struct inode *inode, u64 bytes)
3191{
3192 struct btrfs_space_info *data_sinfo;
3193
3194 /* make sure bytes are sectorsize aligned */
3195 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3196
3197 data_sinfo = BTRFS_I(inode)->space_info;
3198 spin_lock(&data_sinfo->lock);
3199 data_sinfo->bytes_may_use -= bytes;
3200 BTRFS_I(inode)->reserved_bytes -= bytes;
3201 spin_unlock(&data_sinfo->lock);
3202}
3203
3204/* called when we are adding a delalloc extent to the inode's io_tree */
3205void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
3206 u64 bytes)
3207{
3208 struct btrfs_space_info *data_sinfo;
3209
3210 /* get the space info for where this inode will be storing its data */
3211 data_sinfo = BTRFS_I(inode)->space_info;
3212
3213 /* make sure we have enough space to handle the data first */
3214 spin_lock(&data_sinfo->lock);
3215 data_sinfo->bytes_delalloc += bytes;
3216
3217 /*
3218 * we are adding a delalloc extent without calling
3219 * btrfs_check_data_free_space first. This happens on a weird
3220 * writepage condition, but shouldn't hurt our accounting
3221 */
3222 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
3223 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
3224 BTRFS_I(inode)->reserved_bytes = 0;
3225 } else {
3226 data_sinfo->bytes_may_use -= bytes;
3227 BTRFS_I(inode)->reserved_bytes -= bytes;
3228 }
3229
3230 spin_unlock(&data_sinfo->lock);
3231}
3232
3233/* called when we are clearing an delalloc extent from the inode's io_tree */
3234void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
3235 u64 bytes)
3236{
3237 struct btrfs_space_info *info;
3238
3239 info = BTRFS_I(inode)->space_info;
3240
3241 spin_lock(&info->lock);
3242 info->bytes_delalloc -= bytes;
3243 spin_unlock(&info->lock);
3244}
3245
97e728d4
JB
3246static void force_metadata_allocation(struct btrfs_fs_info *info)
3247{
3248 struct list_head *head = &info->space_info;
3249 struct btrfs_space_info *found;
3250
3251 rcu_read_lock();
3252 list_for_each_entry_rcu(found, head, list) {
3253 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3254 found->force_alloc = 1;
3255 }
3256 rcu_read_unlock();
3257}
3258
424499db
YZ
3259static int should_alloc_chunk(struct btrfs_space_info *sinfo,
3260 u64 alloc_bytes)
3261{
3262 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3263
3264 if (sinfo->bytes_used + sinfo->bytes_reserved +
3265 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3266 return 0;
3267
3268 if (sinfo->bytes_used + sinfo->bytes_reserved +
3269 alloc_bytes < div_factor(num_bytes, 8))
3270 return 0;
3271
3272 return 1;
3273}
3274
6324fbf3
CM
3275static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3276 struct btrfs_root *extent_root, u64 alloc_bytes,
0ef3e66b 3277 u64 flags, int force)
6324fbf3
CM
3278{
3279 struct btrfs_space_info *space_info;
97e728d4 3280 struct btrfs_fs_info *fs_info = extent_root->fs_info;
c146afad
YZ
3281 int ret = 0;
3282
97e728d4 3283 mutex_lock(&fs_info->chunk_mutex);
6324fbf3 3284
2b82032c 3285 flags = btrfs_reduce_alloc_profile(extent_root, flags);
ec44a35c 3286
6324fbf3 3287 space_info = __find_space_info(extent_root->fs_info, flags);
593060d7
CM
3288 if (!space_info) {
3289 ret = update_space_info(extent_root->fs_info, flags,
3290 0, 0, &space_info);
3291 BUG_ON(ret);
3292 }
6324fbf3
CM
3293 BUG_ON(!space_info);
3294
25179201 3295 spin_lock(&space_info->lock);
9ed74f2d 3296 if (space_info->force_alloc)
0ef3e66b 3297 force = 1;
25179201
JB
3298 if (space_info->full) {
3299 spin_unlock(&space_info->lock);
925baedd 3300 goto out;
25179201 3301 }
6324fbf3 3302
424499db 3303 if (!force && !should_alloc_chunk(space_info, alloc_bytes)) {
25179201 3304 spin_unlock(&space_info->lock);
925baedd 3305 goto out;
25179201 3306 }
25179201
JB
3307 spin_unlock(&space_info->lock);
3308
97e728d4
JB
3309 /*
3310 * if we're doing a data chunk, go ahead and make sure that
3311 * we keep a reasonable number of metadata chunks allocated in the
3312 * FS as well.
3313 */
9ed74f2d 3314 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
97e728d4
JB
3315 fs_info->data_chunk_allocations++;
3316 if (!(fs_info->data_chunk_allocations %
3317 fs_info->metadata_ratio))
3318 force_metadata_allocation(fs_info);
3319 }
3320
2b82032c 3321 ret = btrfs_alloc_chunk(trans, extent_root, flags);
9ed74f2d 3322 spin_lock(&space_info->lock);
d397712b 3323 if (ret)
6324fbf3 3324 space_info->full = 1;
424499db
YZ
3325 else
3326 ret = 1;
9ed74f2d
JB
3327 space_info->force_alloc = 0;
3328 spin_unlock(&space_info->lock);
a74a4b97 3329out:
c146afad 3330 mutex_unlock(&extent_root->fs_info->chunk_mutex);
0f9dd46c 3331 return ret;
6324fbf3
CM
3332}
3333
424499db
YZ
3334static int maybe_allocate_chunk(struct btrfs_trans_handle *trans,
3335 struct btrfs_root *root,
3336 struct btrfs_space_info *sinfo, u64 num_bytes)
3337{
3338 int ret;
3339 int end_trans = 0;
3340
3341 if (sinfo->full)
3342 return 0;
3343
3344 spin_lock(&sinfo->lock);
3345 ret = should_alloc_chunk(sinfo, num_bytes + 2 * 1024 * 1024);
3346 spin_unlock(&sinfo->lock);
3347 if (!ret)
3348 return 0;
3349
3350 if (!trans) {
3351 trans = btrfs_join_transaction(root, 1);
3352 BUG_ON(IS_ERR(trans));
3353 end_trans = 1;
3354 }
3355
3356 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3357 num_bytes + 2 * 1024 * 1024,
3358 get_alloc_profile(root, sinfo->flags), 0);
3359
3360 if (end_trans)
3361 btrfs_end_transaction(trans, root);
3362
3363 return ret == 1 ? 1 : 0;
3364}
3365
5da9d01b
YZ
3366/*
3367 * shrink metadata reservation for delalloc
3368 */
3369static int shrink_delalloc(struct btrfs_trans_handle *trans,
3370 struct btrfs_root *root,
3371 struct btrfs_space_info *sinfo, u64 to_reclaim)
3372{
3373 u64 reserved;
3374 u64 max_reclaim;
3375 u64 reclaimed = 0;
3376 int pause = 1;
3377 int ret;
3378
3379 spin_lock(&sinfo->lock);
3380 reserved = sinfo->bytes_delalloc;
3381 spin_unlock(&sinfo->lock);
3382
3383 if (reserved == 0)
3384 return 0;
3385
3386 max_reclaim = min(reserved, to_reclaim);
3387
3388 while (1) {
3389 ret = btrfs_start_one_delalloc_inode(root, trans ? 1 : 0);
3390 if (!ret) {
3391 __set_current_state(TASK_INTERRUPTIBLE);
3392 schedule_timeout(pause);
3393 pause <<= 1;
3394 if (pause > HZ / 10)
3395 pause = HZ / 10;
3396 } else {
3397 pause = 1;
3398 }
3399
3400 spin_lock(&sinfo->lock);
3401 if (reserved > sinfo->bytes_delalloc)
3402 reclaimed = reserved - sinfo->bytes_delalloc;
3403 reserved = sinfo->bytes_delalloc;
3404 spin_unlock(&sinfo->lock);
3405
3406 if (reserved == 0 || reclaimed >= max_reclaim)
3407 break;
3408
3409 if (trans && trans->transaction->blocked)
3410 return -EAGAIN;
3411 }
3412 return reclaimed >= to_reclaim;
3413}
3414
9078a3e1
CM
3415static int update_block_group(struct btrfs_trans_handle *trans,
3416 struct btrfs_root *root,
db94535d 3417 u64 bytenr, u64 num_bytes, int alloc,
0b86a832 3418 int mark_free)
9078a3e1
CM
3419{
3420 struct btrfs_block_group_cache *cache;
3421 struct btrfs_fs_info *info = root->fs_info;
b742bb82 3422 int factor;
db94535d 3423 u64 total = num_bytes;
9078a3e1 3424 u64 old_val;
db94535d 3425 u64 byte_in_group;
3e1ad54f 3426
5d4f98a2
YZ
3427 /* block accounting for super block */
3428 spin_lock(&info->delalloc_lock);
3429 old_val = btrfs_super_bytes_used(&info->super_copy);
3430 if (alloc)
3431 old_val += num_bytes;
3432 else
3433 old_val -= num_bytes;
3434 btrfs_set_super_bytes_used(&info->super_copy, old_val);
5d4f98a2
YZ
3435 spin_unlock(&info->delalloc_lock);
3436
d397712b 3437 while (total) {
db94535d 3438 cache = btrfs_lookup_block_group(info, bytenr);
f3465ca4 3439 if (!cache)
9078a3e1 3440 return -1;
b742bb82
YZ
3441 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
3442 BTRFS_BLOCK_GROUP_RAID1 |
3443 BTRFS_BLOCK_GROUP_RAID10))
3444 factor = 2;
3445 else
3446 factor = 1;
db94535d
CM
3447 byte_in_group = bytenr - cache->key.objectid;
3448 WARN_ON(byte_in_group > cache->key.offset);
9078a3e1 3449
25179201 3450 spin_lock(&cache->space_info->lock);
c286ac48 3451 spin_lock(&cache->lock);
0f9dd46c 3452 cache->dirty = 1;
9078a3e1 3453 old_val = btrfs_block_group_used(&cache->item);
db94535d 3454 num_bytes = min(total, cache->key.offset - byte_in_group);
cd1bc465 3455 if (alloc) {
db94535d 3456 old_val += num_bytes;
11833d66
YZ
3457 btrfs_set_block_group_used(&cache->item, old_val);
3458 cache->reserved -= num_bytes;
11833d66 3459 cache->space_info->bytes_reserved -= num_bytes;
b742bb82
YZ
3460 cache->space_info->bytes_used += num_bytes;
3461 cache->space_info->disk_used += num_bytes * factor;
a512bbf8 3462 if (cache->ro)
c146afad 3463 cache->space_info->bytes_readonly -= num_bytes;
c286ac48 3464 spin_unlock(&cache->lock);
25179201 3465 spin_unlock(&cache->space_info->lock);
cd1bc465 3466 } else {
db94535d 3467 old_val -= num_bytes;
b742bb82 3468 btrfs_set_block_group_used(&cache->item, old_val);
6324fbf3 3469 cache->space_info->bytes_used -= num_bytes;
b742bb82 3470 cache->space_info->disk_used -= num_bytes * factor;
c146afad
YZ
3471 if (cache->ro)
3472 cache->space_info->bytes_readonly += num_bytes;
c286ac48 3473 spin_unlock(&cache->lock);
25179201 3474 spin_unlock(&cache->space_info->lock);
f510cfec 3475 if (mark_free) {
0f9dd46c 3476 int ret;
1f3c79a2
LH
3477
3478 ret = btrfs_discard_extent(root, bytenr,
3479 num_bytes);
3480 WARN_ON(ret);
3481
0f9dd46c
JB
3482 ret = btrfs_add_free_space(cache, bytenr,
3483 num_bytes);
d2fb3437 3484 WARN_ON(ret);
e37c9e69 3485 }
cd1bc465 3486 }
fa9c0d79 3487 btrfs_put_block_group(cache);
db94535d
CM
3488 total -= num_bytes;
3489 bytenr += num_bytes;
9078a3e1
CM
3490 }
3491 return 0;
3492}
6324fbf3 3493
a061fc8d
CM
3494static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3495{
0f9dd46c 3496 struct btrfs_block_group_cache *cache;
d2fb3437 3497 u64 bytenr;
0f9dd46c
JB
3498
3499 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3500 if (!cache)
a061fc8d 3501 return 0;
0f9dd46c 3502
d2fb3437 3503 bytenr = cache->key.objectid;
fa9c0d79 3504 btrfs_put_block_group(cache);
d2fb3437
YZ
3505
3506 return bytenr;
a061fc8d
CM
3507}
3508
11833d66
YZ
3509/*
3510 * this function must be called within transaction
3511 */
3512int btrfs_pin_extent(struct btrfs_root *root,
3513 u64 bytenr, u64 num_bytes, int reserved)
324ae4df 3514{
324ae4df 3515 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66 3516 struct btrfs_block_group_cache *cache;
324ae4df 3517
11833d66
YZ
3518 cache = btrfs_lookup_block_group(fs_info, bytenr);
3519 BUG_ON(!cache);
68b38550 3520
11833d66
YZ
3521 spin_lock(&cache->space_info->lock);
3522 spin_lock(&cache->lock);
3523 cache->pinned += num_bytes;
3524 cache->space_info->bytes_pinned += num_bytes;
3525 if (reserved) {
3526 cache->reserved -= num_bytes;
3527 cache->space_info->bytes_reserved -= num_bytes;
3528 }
3529 spin_unlock(&cache->lock);
3530 spin_unlock(&cache->space_info->lock);
68b38550 3531
11833d66 3532 btrfs_put_block_group(cache);
68b38550 3533
11833d66
YZ
3534 set_extent_dirty(fs_info->pinned_extents,
3535 bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3536 return 0;
3537}
3538
3539static int update_reserved_extents(struct btrfs_block_group_cache *cache,
3540 u64 num_bytes, int reserve)
3541{
3542 spin_lock(&cache->space_info->lock);
3543 spin_lock(&cache->lock);
3544 if (reserve) {
3545 cache->reserved += num_bytes;
3546 cache->space_info->bytes_reserved += num_bytes;
3547 } else {
3548 cache->reserved -= num_bytes;
3549 cache->space_info->bytes_reserved -= num_bytes;
324ae4df 3550 }
11833d66
YZ
3551 spin_unlock(&cache->lock);
3552 spin_unlock(&cache->space_info->lock);
324ae4df
Y
3553 return 0;
3554}
9078a3e1 3555
11833d66
YZ
3556int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3557 struct btrfs_root *root)
e8569813 3558{
e8569813 3559 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66
YZ
3560 struct btrfs_caching_control *next;
3561 struct btrfs_caching_control *caching_ctl;
3562 struct btrfs_block_group_cache *cache;
e8569813 3563
11833d66 3564 down_write(&fs_info->extent_commit_sem);
25179201 3565
11833d66
YZ
3566 list_for_each_entry_safe(caching_ctl, next,
3567 &fs_info->caching_block_groups, list) {
3568 cache = caching_ctl->block_group;
3569 if (block_group_cache_done(cache)) {
3570 cache->last_byte_to_unpin = (u64)-1;
3571 list_del_init(&caching_ctl->list);
3572 put_caching_control(caching_ctl);
e8569813 3573 } else {
11833d66 3574 cache->last_byte_to_unpin = caching_ctl->progress;
e8569813 3575 }
e8569813 3576 }
11833d66
YZ
3577
3578 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3579 fs_info->pinned_extents = &fs_info->freed_extents[1];
3580 else
3581 fs_info->pinned_extents = &fs_info->freed_extents[0];
3582
3583 up_write(&fs_info->extent_commit_sem);
e8569813
ZY
3584 return 0;
3585}
3586
11833d66 3587static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
ccd467d6 3588{
11833d66
YZ
3589 struct btrfs_fs_info *fs_info = root->fs_info;
3590 struct btrfs_block_group_cache *cache = NULL;
3591 u64 len;
ccd467d6 3592
11833d66
YZ
3593 while (start <= end) {
3594 if (!cache ||
3595 start >= cache->key.objectid + cache->key.offset) {
3596 if (cache)
3597 btrfs_put_block_group(cache);
3598 cache = btrfs_lookup_block_group(fs_info, start);
3599 BUG_ON(!cache);
3600 }
3601
3602 len = cache->key.objectid + cache->key.offset - start;
3603 len = min(len, end + 1 - start);
3604
3605 if (start < cache->last_byte_to_unpin) {
3606 len = min(len, cache->last_byte_to_unpin - start);
3607 btrfs_add_free_space(cache, start, len);
3608 }
3609
3610 spin_lock(&cache->space_info->lock);
3611 spin_lock(&cache->lock);
3612 cache->pinned -= len;
3613 cache->space_info->bytes_pinned -= len;
3614 spin_unlock(&cache->lock);
3615 spin_unlock(&cache->space_info->lock);
817d52f8 3616
11833d66 3617 start += len;
ccd467d6 3618 }
11833d66
YZ
3619
3620 if (cache)
3621 btrfs_put_block_group(cache);
ccd467d6
CM
3622 return 0;
3623}
3624
3625int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
11833d66 3626 struct btrfs_root *root)
a28ec197 3627{
11833d66
YZ
3628 struct btrfs_fs_info *fs_info = root->fs_info;
3629 struct extent_io_tree *unpin;
1a5bc167
CM
3630 u64 start;
3631 u64 end;
a28ec197 3632 int ret;
a28ec197 3633
11833d66
YZ
3634 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3635 unpin = &fs_info->freed_extents[1];
3636 else
3637 unpin = &fs_info->freed_extents[0];
3638
d397712b 3639 while (1) {
1a5bc167
CM
3640 ret = find_first_extent_bit(unpin, 0, &start, &end,
3641 EXTENT_DIRTY);
3642 if (ret)
a28ec197 3643 break;
1f3c79a2
LH
3644
3645 ret = btrfs_discard_extent(root, start, end + 1 - start);
3646
1a5bc167 3647 clear_extent_dirty(unpin, start, end, GFP_NOFS);
11833d66 3648 unpin_extent_range(root, start, end);
b9473439 3649 cond_resched();
a28ec197 3650 }
817d52f8 3651
1f3c79a2 3652 return ret;
a28ec197
CM
3653}
3654
31840ae1
ZY
3655static int pin_down_bytes(struct btrfs_trans_handle *trans,
3656 struct btrfs_root *root,
b9473439 3657 struct btrfs_path *path,
11833d66
YZ
3658 u64 bytenr, u64 num_bytes,
3659 int is_data, int reserved,
b9473439 3660 struct extent_buffer **must_clean)
e20d96d6 3661{
1a5bc167 3662 int err = 0;
31840ae1 3663 struct extent_buffer *buf;
8ef97622 3664
31840ae1
ZY
3665 if (is_data)
3666 goto pinit;
3667
444528b3
CM
3668 /*
3669 * discard is sloooow, and so triggering discards on
3670 * individual btree blocks isn't a good plan. Just
3671 * pin everything in discard mode.
3672 */
3673 if (btrfs_test_opt(root, DISCARD))
3674 goto pinit;
3675
31840ae1
ZY
3676 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3677 if (!buf)
3678 goto pinit;
3679
3680 /* we can reuse a block if it hasn't been written
3681 * and it is from this transaction. We can't
3682 * reuse anything from the tree log root because
3683 * it has tiny sub-transactions.
3684 */
3685 if (btrfs_buffer_uptodate(buf, 0) &&
3686 btrfs_try_tree_lock(buf)) {
3687 u64 header_owner = btrfs_header_owner(buf);
3688 u64 header_transid = btrfs_header_generation(buf);
3689 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3690 header_transid == trans->transid &&
3691 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
b9473439 3692 *must_clean = buf;
31840ae1 3693 return 1;
8ef97622 3694 }
31840ae1 3695 btrfs_tree_unlock(buf);
f4b9aa8d 3696 }
31840ae1
ZY
3697 free_extent_buffer(buf);
3698pinit:
11833d66
YZ
3699 if (path)
3700 btrfs_set_path_blocking(path);
b9473439 3701 /* unlocks the pinned mutex */
11833d66 3702 btrfs_pin_extent(root, bytenr, num_bytes, reserved);
31840ae1 3703
be744175 3704 BUG_ON(err < 0);
e20d96d6
CM
3705 return 0;
3706}
3707
5d4f98a2
YZ
3708static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3709 struct btrfs_root *root,
3710 u64 bytenr, u64 num_bytes, u64 parent,
3711 u64 root_objectid, u64 owner_objectid,
3712 u64 owner_offset, int refs_to_drop,
3713 struct btrfs_delayed_extent_op *extent_op)
a28ec197 3714{
e2fa7227 3715 struct btrfs_key key;
5d4f98a2 3716 struct btrfs_path *path;
1261ec42
CM
3717 struct btrfs_fs_info *info = root->fs_info;
3718 struct btrfs_root *extent_root = info->extent_root;
5f39d397 3719 struct extent_buffer *leaf;
5d4f98a2
YZ
3720 struct btrfs_extent_item *ei;
3721 struct btrfs_extent_inline_ref *iref;
a28ec197 3722 int ret;
5d4f98a2 3723 int is_data;
952fccac
CM
3724 int extent_slot = 0;
3725 int found_extent = 0;
3726 int num_to_del = 1;
5d4f98a2
YZ
3727 u32 item_size;
3728 u64 refs;
037e6390 3729
5caf2a00 3730 path = btrfs_alloc_path();
54aa1f4d
CM
3731 if (!path)
3732 return -ENOMEM;
5f26f772 3733
3c12ac72 3734 path->reada = 1;
b9473439 3735 path->leave_spinning = 1;
5d4f98a2
YZ
3736
3737 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3738 BUG_ON(!is_data && refs_to_drop != 1);
3739
3740 ret = lookup_extent_backref(trans, extent_root, path, &iref,
3741 bytenr, num_bytes, parent,
3742 root_objectid, owner_objectid,
3743 owner_offset);
7bb86316 3744 if (ret == 0) {
952fccac 3745 extent_slot = path->slots[0];
5d4f98a2
YZ
3746 while (extent_slot >= 0) {
3747 btrfs_item_key_to_cpu(path->nodes[0], &key,
952fccac 3748 extent_slot);
5d4f98a2 3749 if (key.objectid != bytenr)
952fccac 3750 break;
5d4f98a2
YZ
3751 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3752 key.offset == num_bytes) {
952fccac
CM
3753 found_extent = 1;
3754 break;
3755 }
3756 if (path->slots[0] - extent_slot > 5)
3757 break;
5d4f98a2 3758 extent_slot--;
952fccac 3759 }
5d4f98a2
YZ
3760#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3761 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3762 if (found_extent && item_size < sizeof(*ei))
3763 found_extent = 0;
3764#endif
31840ae1 3765 if (!found_extent) {
5d4f98a2 3766 BUG_ON(iref);
56bec294 3767 ret = remove_extent_backref(trans, extent_root, path,
5d4f98a2
YZ
3768 NULL, refs_to_drop,
3769 is_data);
31840ae1
ZY
3770 BUG_ON(ret);
3771 btrfs_release_path(extent_root, path);
b9473439 3772 path->leave_spinning = 1;
5d4f98a2
YZ
3773
3774 key.objectid = bytenr;
3775 key.type = BTRFS_EXTENT_ITEM_KEY;
3776 key.offset = num_bytes;
3777
31840ae1
ZY
3778 ret = btrfs_search_slot(trans, extent_root,
3779 &key, path, -1, 1);
f3465ca4
JB
3780 if (ret) {
3781 printk(KERN_ERR "umm, got %d back from search"
d397712b
CM
3782 ", was looking for %llu\n", ret,
3783 (unsigned long long)bytenr);
f3465ca4
JB
3784 btrfs_print_leaf(extent_root, path->nodes[0]);
3785 }
31840ae1
ZY
3786 BUG_ON(ret);
3787 extent_slot = path->slots[0];
3788 }
7bb86316
CM
3789 } else {
3790 btrfs_print_leaf(extent_root, path->nodes[0]);
3791 WARN_ON(1);
d397712b 3792 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5d4f98a2 3793 "parent %llu root %llu owner %llu offset %llu\n",
d397712b 3794 (unsigned long long)bytenr,
56bec294 3795 (unsigned long long)parent,
d397712b 3796 (unsigned long long)root_objectid,
5d4f98a2
YZ
3797 (unsigned long long)owner_objectid,
3798 (unsigned long long)owner_offset);
7bb86316 3799 }
5f39d397
CM
3800
3801 leaf = path->nodes[0];
5d4f98a2
YZ
3802 item_size = btrfs_item_size_nr(leaf, extent_slot);
3803#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3804 if (item_size < sizeof(*ei)) {
3805 BUG_ON(found_extent || extent_slot != path->slots[0]);
3806 ret = convert_extent_item_v0(trans, extent_root, path,
3807 owner_objectid, 0);
3808 BUG_ON(ret < 0);
3809
3810 btrfs_release_path(extent_root, path);
3811 path->leave_spinning = 1;
3812
3813 key.objectid = bytenr;
3814 key.type = BTRFS_EXTENT_ITEM_KEY;
3815 key.offset = num_bytes;
3816
3817 ret = btrfs_search_slot(trans, extent_root, &key, path,
3818 -1, 1);
3819 if (ret) {
3820 printk(KERN_ERR "umm, got %d back from search"
3821 ", was looking for %llu\n", ret,
3822 (unsigned long long)bytenr);
3823 btrfs_print_leaf(extent_root, path->nodes[0]);
3824 }
3825 BUG_ON(ret);
3826 extent_slot = path->slots[0];
3827 leaf = path->nodes[0];
3828 item_size = btrfs_item_size_nr(leaf, extent_slot);
3829 }
3830#endif
3831 BUG_ON(item_size < sizeof(*ei));
952fccac 3832 ei = btrfs_item_ptr(leaf, extent_slot,
123abc88 3833 struct btrfs_extent_item);
5d4f98a2
YZ
3834 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3835 struct btrfs_tree_block_info *bi;
3836 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3837 bi = (struct btrfs_tree_block_info *)(ei + 1);
3838 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3839 }
56bec294 3840
5d4f98a2 3841 refs = btrfs_extent_refs(leaf, ei);
56bec294
CM
3842 BUG_ON(refs < refs_to_drop);
3843 refs -= refs_to_drop;
5f39d397 3844
5d4f98a2
YZ
3845 if (refs > 0) {
3846 if (extent_op)
3847 __run_delayed_extent_op(extent_op, leaf, ei);
3848 /*
3849 * In the case of inline back ref, reference count will
3850 * be updated by remove_extent_backref
952fccac 3851 */
5d4f98a2
YZ
3852 if (iref) {
3853 BUG_ON(!found_extent);
3854 } else {
3855 btrfs_set_extent_refs(leaf, ei, refs);
3856 btrfs_mark_buffer_dirty(leaf);
3857 }
3858 if (found_extent) {
3859 ret = remove_extent_backref(trans, extent_root, path,
3860 iref, refs_to_drop,
3861 is_data);
952fccac
CM
3862 BUG_ON(ret);
3863 }
5d4f98a2
YZ
3864 } else {
3865 int mark_free = 0;
b9473439 3866 struct extent_buffer *must_clean = NULL;
78fae27e 3867
5d4f98a2
YZ
3868 if (found_extent) {
3869 BUG_ON(is_data && refs_to_drop !=
3870 extent_data_ref_count(root, path, iref));
3871 if (iref) {
3872 BUG_ON(path->slots[0] != extent_slot);
3873 } else {
3874 BUG_ON(path->slots[0] != extent_slot + 1);
3875 path->slots[0] = extent_slot;
3876 num_to_del = 2;
3877 }
78fae27e 3878 }
b9473439 3879
5d4f98a2 3880 ret = pin_down_bytes(trans, root, path, bytenr,
11833d66 3881 num_bytes, is_data, 0, &must_clean);
5d4f98a2
YZ
3882 if (ret > 0)
3883 mark_free = 1;
3884 BUG_ON(ret < 0);
b9473439
CM
3885 /*
3886 * it is going to be very rare for someone to be waiting
3887 * on the block we're freeing. del_items might need to
3888 * schedule, so rather than get fancy, just force it
3889 * to blocking here
3890 */
3891 if (must_clean)
3892 btrfs_set_lock_blocking(must_clean);
3893
952fccac
CM
3894 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3895 num_to_del);
31840ae1 3896 BUG_ON(ret);
25179201 3897 btrfs_release_path(extent_root, path);
21af804c 3898
b9473439
CM
3899 if (must_clean) {
3900 clean_tree_block(NULL, root, must_clean);
3901 btrfs_tree_unlock(must_clean);
3902 free_extent_buffer(must_clean);
3903 }
3904
5d4f98a2 3905 if (is_data) {
459931ec
CM
3906 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3907 BUG_ON(ret);
d57e62b8
CM
3908 } else {
3909 invalidate_mapping_pages(info->btree_inode->i_mapping,
3910 bytenr >> PAGE_CACHE_SHIFT,
3911 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
459931ec
CM
3912 }
3913
dcbdd4dc
CM
3914 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3915 mark_free);
3916 BUG_ON(ret);
a28ec197 3917 }
5caf2a00 3918 btrfs_free_path(path);
a28ec197
CM
3919 return ret;
3920}
3921
1887be66
CM
3922/*
3923 * when we free an extent, it is possible (and likely) that we free the last
3924 * delayed ref for that extent as well. This searches the delayed ref tree for
3925 * a given extent, and if there are no other delayed refs to be processed, it
3926 * removes it from the tree.
3927 */
3928static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3929 struct btrfs_root *root, u64 bytenr)
3930{
3931 struct btrfs_delayed_ref_head *head;
3932 struct btrfs_delayed_ref_root *delayed_refs;
3933 struct btrfs_delayed_ref_node *ref;
3934 struct rb_node *node;
3935 int ret;
3936
3937 delayed_refs = &trans->transaction->delayed_refs;
3938 spin_lock(&delayed_refs->lock);
3939 head = btrfs_find_delayed_ref_head(trans, bytenr);
3940 if (!head)
3941 goto out;
3942
3943 node = rb_prev(&head->node.rb_node);
3944 if (!node)
3945 goto out;
3946
3947 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3948
3949 /* there are still entries for this ref, we can't drop it */
3950 if (ref->bytenr == bytenr)
3951 goto out;
3952
5d4f98a2
YZ
3953 if (head->extent_op) {
3954 if (!head->must_insert_reserved)
3955 goto out;
3956 kfree(head->extent_op);
3957 head->extent_op = NULL;
3958 }
3959
1887be66
CM
3960 /*
3961 * waiting for the lock here would deadlock. If someone else has it
3962 * locked they are already in the process of dropping it anyway
3963 */
3964 if (!mutex_trylock(&head->mutex))
3965 goto out;
3966
3967 /*
3968 * at this point we have a head with no other entries. Go
3969 * ahead and process it.
3970 */
3971 head->node.in_tree = 0;
3972 rb_erase(&head->node.rb_node, &delayed_refs->root);
c3e69d58 3973
1887be66
CM
3974 delayed_refs->num_entries--;
3975
3976 /*
3977 * we don't take a ref on the node because we're removing it from the
3978 * tree, so we just steal the ref the tree was holding.
3979 */
c3e69d58
CM
3980 delayed_refs->num_heads--;
3981 if (list_empty(&head->cluster))
3982 delayed_refs->num_heads_ready--;
3983
3984 list_del_init(&head->cluster);
1887be66
CM
3985 spin_unlock(&delayed_refs->lock);
3986
3987 ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
5d4f98a2
YZ
3988 &head->node, head->extent_op,
3989 head->must_insert_reserved);
1887be66
CM
3990 BUG_ON(ret);
3991 btrfs_put_delayed_ref(&head->node);
3992 return 0;
3993out:
3994 spin_unlock(&delayed_refs->lock);
3995 return 0;
3996}
3997
925baedd 3998int btrfs_free_extent(struct btrfs_trans_handle *trans,
31840ae1
ZY
3999 struct btrfs_root *root,
4000 u64 bytenr, u64 num_bytes, u64 parent,
5d4f98a2 4001 u64 root_objectid, u64 owner, u64 offset)
925baedd
CM
4002{
4003 int ret;
4004
56bec294
CM
4005 /*
4006 * tree log blocks never actually go into the extent allocation
4007 * tree, just update pinning info and exit early.
56bec294 4008 */
5d4f98a2
YZ
4009 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4010 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
b9473439 4011 /* unlocks the pinned mutex */
11833d66 4012 btrfs_pin_extent(root, bytenr, num_bytes, 1);
56bec294 4013 ret = 0;
5d4f98a2
YZ
4014 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4015 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4016 parent, root_objectid, (int)owner,
4017 BTRFS_DROP_DELAYED_REF, NULL);
1887be66
CM
4018 BUG_ON(ret);
4019 ret = check_ref_cleanup(trans, root, bytenr);
4020 BUG_ON(ret);
5d4f98a2
YZ
4021 } else {
4022 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4023 parent, root_objectid, owner,
4024 offset, BTRFS_DROP_DELAYED_REF, NULL);
4025 BUG_ON(ret);
56bec294 4026 }
925baedd
CM
4027 return ret;
4028}
4029
86b9f2ec
YZ
4030int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4031 struct btrfs_root *root,
4032 u64 bytenr, u32 blocksize,
4033 u64 parent, u64 root_objectid, int level)
4034{
4035 u64 used;
4036 spin_lock(&root->node_lock);
4037 used = btrfs_root_used(&root->root_item) - blocksize;
4038 btrfs_set_root_used(&root->root_item, used);
4039 spin_unlock(&root->node_lock);
4040
4041 return btrfs_free_extent(trans, root, bytenr, blocksize,
4042 parent, root_objectid, level, 0);
4043}
4044
87ee04eb
CM
4045static u64 stripe_align(struct btrfs_root *root, u64 val)
4046{
4047 u64 mask = ((u64)root->stripesize - 1);
4048 u64 ret = (val + mask) & ~mask;
4049 return ret;
4050}
4051
817d52f8
JB
4052/*
4053 * when we wait for progress in the block group caching, its because
4054 * our allocation attempt failed at least once. So, we must sleep
4055 * and let some progress happen before we try again.
4056 *
4057 * This function will sleep at least once waiting for new free space to
4058 * show up, and then it will check the block group free space numbers
4059 * for our min num_bytes. Another option is to have it go ahead
4060 * and look in the rbtree for a free extent of a given size, but this
4061 * is a good start.
4062 */
4063static noinline int
4064wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4065 u64 num_bytes)
4066{
11833d66 4067 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
4068 DEFINE_WAIT(wait);
4069
11833d66
YZ
4070 caching_ctl = get_caching_control(cache);
4071 if (!caching_ctl)
817d52f8 4072 return 0;
817d52f8 4073
11833d66 4074 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
817d52f8 4075 (cache->free_space >= num_bytes));
11833d66
YZ
4076
4077 put_caching_control(caching_ctl);
4078 return 0;
4079}
4080
4081static noinline int
4082wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4083{
4084 struct btrfs_caching_control *caching_ctl;
4085 DEFINE_WAIT(wait);
4086
4087 caching_ctl = get_caching_control(cache);
4088 if (!caching_ctl)
4089 return 0;
4090
4091 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4092
4093 put_caching_control(caching_ctl);
817d52f8
JB
4094 return 0;
4095}
4096
b742bb82
YZ
4097static int get_block_group_index(struct btrfs_block_group_cache *cache)
4098{
4099 int index;
4100 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4101 index = 0;
4102 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4103 index = 1;
4104 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4105 index = 2;
4106 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4107 index = 3;
4108 else
4109 index = 4;
4110 return index;
4111}
4112
817d52f8 4113enum btrfs_loop_type {
ccf0e725 4114 LOOP_FIND_IDEAL = 0,
817d52f8
JB
4115 LOOP_CACHING_NOWAIT = 1,
4116 LOOP_CACHING_WAIT = 2,
4117 LOOP_ALLOC_CHUNK = 3,
4118 LOOP_NO_EMPTY_SIZE = 4,
4119};
4120
fec577fb
CM
4121/*
4122 * walks the btree of allocated extents and find a hole of a given size.
4123 * The key ins is changed to record the hole:
4124 * ins->objectid == block start
62e2749e 4125 * ins->flags = BTRFS_EXTENT_ITEM_KEY
fec577fb
CM
4126 * ins->offset == number of blocks
4127 * Any available blocks before search_start are skipped.
4128 */
d397712b 4129static noinline int find_free_extent(struct btrfs_trans_handle *trans,
98ed5174
CM
4130 struct btrfs_root *orig_root,
4131 u64 num_bytes, u64 empty_size,
4132 u64 search_start, u64 search_end,
4133 u64 hint_byte, struct btrfs_key *ins,
4134 u64 exclude_start, u64 exclude_nr,
4135 int data)
fec577fb 4136{
80eb234a 4137 int ret = 0;
d397712b 4138 struct btrfs_root *root = orig_root->fs_info->extent_root;
fa9c0d79 4139 struct btrfs_free_cluster *last_ptr = NULL;
80eb234a 4140 struct btrfs_block_group_cache *block_group = NULL;
239b14b3 4141 int empty_cluster = 2 * 1024 * 1024;
0ef3e66b 4142 int allowed_chunk_alloc = 0;
ccf0e725 4143 int done_chunk_alloc = 0;
80eb234a 4144 struct btrfs_space_info *space_info;
fa9c0d79 4145 int last_ptr_loop = 0;
b742bb82 4146 int index = 0;
fa9c0d79 4147 int loop = 0;
817d52f8 4148 bool found_uncached_bg = false;
0a24325e 4149 bool failed_cluster_refill = false;
1cdda9b8 4150 bool failed_alloc = false;
ccf0e725
JB
4151 u64 ideal_cache_percent = 0;
4152 u64 ideal_cache_offset = 0;
fec577fb 4153
db94535d 4154 WARN_ON(num_bytes < root->sectorsize);
b1a4d965 4155 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
80eb234a
JB
4156 ins->objectid = 0;
4157 ins->offset = 0;
b1a4d965 4158
2552d17e 4159 space_info = __find_space_info(root->fs_info, data);
1b1d1f66
JB
4160 if (!space_info) {
4161 printk(KERN_ERR "No space info for %d\n", data);
4162 return -ENOSPC;
4163 }
2552d17e 4164
0ef3e66b
CM
4165 if (orig_root->ref_cows || empty_size)
4166 allowed_chunk_alloc = 1;
4167
239b14b3 4168 if (data & BTRFS_BLOCK_GROUP_METADATA) {
fa9c0d79 4169 last_ptr = &root->fs_info->meta_alloc_cluster;
536ac8ae
CM
4170 if (!btrfs_test_opt(root, SSD))
4171 empty_cluster = 64 * 1024;
239b14b3
CM
4172 }
4173
fa9c0d79
CM
4174 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4175 last_ptr = &root->fs_info->data_alloc_cluster;
4176 }
0f9dd46c 4177
239b14b3 4178 if (last_ptr) {
fa9c0d79
CM
4179 spin_lock(&last_ptr->lock);
4180 if (last_ptr->block_group)
4181 hint_byte = last_ptr->window_start;
4182 spin_unlock(&last_ptr->lock);
239b14b3 4183 }
fa9c0d79 4184
a061fc8d 4185 search_start = max(search_start, first_logical_byte(root, 0));
239b14b3 4186 search_start = max(search_start, hint_byte);
0b86a832 4187
817d52f8 4188 if (!last_ptr)
fa9c0d79 4189 empty_cluster = 0;
fa9c0d79 4190
2552d17e 4191 if (search_start == hint_byte) {
ccf0e725 4192ideal_cache:
2552d17e
JB
4193 block_group = btrfs_lookup_block_group(root->fs_info,
4194 search_start);
817d52f8
JB
4195 /*
4196 * we don't want to use the block group if it doesn't match our
4197 * allocation bits, or if its not cached.
ccf0e725
JB
4198 *
4199 * However if we are re-searching with an ideal block group
4200 * picked out then we don't care that the block group is cached.
817d52f8
JB
4201 */
4202 if (block_group && block_group_bits(block_group, data) &&
ccf0e725
JB
4203 (block_group->cached != BTRFS_CACHE_NO ||
4204 search_start == ideal_cache_offset)) {
2552d17e 4205 down_read(&space_info->groups_sem);
44fb5511
CM
4206 if (list_empty(&block_group->list) ||
4207 block_group->ro) {
4208 /*
4209 * someone is removing this block group,
4210 * we can't jump into the have_block_group
4211 * target because our list pointers are not
4212 * valid
4213 */
4214 btrfs_put_block_group(block_group);
4215 up_read(&space_info->groups_sem);
ccf0e725 4216 } else {
b742bb82 4217 index = get_block_group_index(block_group);
44fb5511 4218 goto have_block_group;
ccf0e725 4219 }
2552d17e 4220 } else if (block_group) {
fa9c0d79 4221 btrfs_put_block_group(block_group);
2552d17e 4222 }
42e70e7a 4223 }
2552d17e 4224search:
80eb234a 4225 down_read(&space_info->groups_sem);
b742bb82
YZ
4226 list_for_each_entry(block_group, &space_info->block_groups[index],
4227 list) {
6226cb0a 4228 u64 offset;
817d52f8 4229 int cached;
8a1413a2 4230
11dfe35a 4231 btrfs_get_block_group(block_group);
2552d17e 4232 search_start = block_group->key.objectid;
42e70e7a 4233
2552d17e 4234have_block_group:
817d52f8 4235 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
ccf0e725
JB
4236 u64 free_percent;
4237
4238 free_percent = btrfs_block_group_used(&block_group->item);
4239 free_percent *= 100;
4240 free_percent = div64_u64(free_percent,
4241 block_group->key.offset);
4242 free_percent = 100 - free_percent;
4243 if (free_percent > ideal_cache_percent &&
4244 likely(!block_group->ro)) {
4245 ideal_cache_offset = block_group->key.objectid;
4246 ideal_cache_percent = free_percent;
4247 }
4248
817d52f8 4249 /*
ccf0e725
JB
4250 * We only want to start kthread caching if we are at
4251 * the point where we will wait for caching to make
4252 * progress, or if our ideal search is over and we've
4253 * found somebody to start caching.
817d52f8
JB
4254 */
4255 if (loop > LOOP_CACHING_NOWAIT ||
ccf0e725
JB
4256 (loop > LOOP_FIND_IDEAL &&
4257 atomic_read(&space_info->caching_threads) < 2)) {
817d52f8
JB
4258 ret = cache_block_group(block_group);
4259 BUG_ON(ret);
2552d17e 4260 }
817d52f8
JB
4261 found_uncached_bg = true;
4262
ccf0e725
JB
4263 /*
4264 * If loop is set for cached only, try the next block
4265 * group.
4266 */
4267 if (loop == LOOP_FIND_IDEAL)
817d52f8
JB
4268 goto loop;
4269 }
4270
ccf0e725
JB
4271 cached = block_group_cache_done(block_group);
4272 if (unlikely(!cached))
4273 found_uncached_bg = true;
4274
ea6a478e 4275 if (unlikely(block_group->ro))
2552d17e 4276 goto loop;
0f9dd46c 4277
0a24325e
JB
4278 /*
4279 * Ok we want to try and use the cluster allocator, so lets look
4280 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4281 * have tried the cluster allocator plenty of times at this
4282 * point and not have found anything, so we are likely way too
4283 * fragmented for the clustering stuff to find anything, so lets
4284 * just skip it and let the allocator find whatever block it can
4285 * find
4286 */
4287 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79
CM
4288 /*
4289 * the refill lock keeps out other
4290 * people trying to start a new cluster
4291 */
4292 spin_lock(&last_ptr->refill_lock);
44fb5511
CM
4293 if (last_ptr->block_group &&
4294 (last_ptr->block_group->ro ||
4295 !block_group_bits(last_ptr->block_group, data))) {
4296 offset = 0;
4297 goto refill_cluster;
4298 }
4299
fa9c0d79
CM
4300 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4301 num_bytes, search_start);
4302 if (offset) {
4303 /* we have a block, we're done */
4304 spin_unlock(&last_ptr->refill_lock);
4305 goto checks;
4306 }
4307
4308 spin_lock(&last_ptr->lock);
4309 /*
4310 * whoops, this cluster doesn't actually point to
4311 * this block group. Get a ref on the block
4312 * group is does point to and try again
4313 */
4314 if (!last_ptr_loop && last_ptr->block_group &&
4315 last_ptr->block_group != block_group) {
4316
4317 btrfs_put_block_group(block_group);
4318 block_group = last_ptr->block_group;
11dfe35a 4319 btrfs_get_block_group(block_group);
fa9c0d79
CM
4320 spin_unlock(&last_ptr->lock);
4321 spin_unlock(&last_ptr->refill_lock);
4322
4323 last_ptr_loop = 1;
4324 search_start = block_group->key.objectid;
44fb5511
CM
4325 /*
4326 * we know this block group is properly
4327 * in the list because
4328 * btrfs_remove_block_group, drops the
4329 * cluster before it removes the block
4330 * group from the list
4331 */
fa9c0d79
CM
4332 goto have_block_group;
4333 }
4334 spin_unlock(&last_ptr->lock);
44fb5511 4335refill_cluster:
fa9c0d79
CM
4336 /*
4337 * this cluster didn't work out, free it and
4338 * start over
4339 */
4340 btrfs_return_cluster_to_free_space(NULL, last_ptr);
4341
4342 last_ptr_loop = 0;
4343
4344 /* allocate a cluster in this block group */
451d7585 4345 ret = btrfs_find_space_cluster(trans, root,
fa9c0d79
CM
4346 block_group, last_ptr,
4347 offset, num_bytes,
4348 empty_cluster + empty_size);
4349 if (ret == 0) {
4350 /*
4351 * now pull our allocation out of this
4352 * cluster
4353 */
4354 offset = btrfs_alloc_from_cluster(block_group,
4355 last_ptr, num_bytes,
4356 search_start);
4357 if (offset) {
4358 /* we found one, proceed */
4359 spin_unlock(&last_ptr->refill_lock);
4360 goto checks;
4361 }
0a24325e
JB
4362 } else if (!cached && loop > LOOP_CACHING_NOWAIT
4363 && !failed_cluster_refill) {
817d52f8
JB
4364 spin_unlock(&last_ptr->refill_lock);
4365
0a24325e 4366 failed_cluster_refill = true;
817d52f8
JB
4367 wait_block_group_cache_progress(block_group,
4368 num_bytes + empty_cluster + empty_size);
4369 goto have_block_group;
fa9c0d79 4370 }
817d52f8 4371
fa9c0d79
CM
4372 /*
4373 * at this point we either didn't find a cluster
4374 * or we weren't able to allocate a block from our
4375 * cluster. Free the cluster we've been trying
4376 * to use, and go to the next block group
4377 */
0a24325e 4378 btrfs_return_cluster_to_free_space(NULL, last_ptr);
fa9c0d79 4379 spin_unlock(&last_ptr->refill_lock);
0a24325e 4380 goto loop;
fa9c0d79
CM
4381 }
4382
6226cb0a
JB
4383 offset = btrfs_find_space_for_alloc(block_group, search_start,
4384 num_bytes, empty_size);
1cdda9b8
JB
4385 /*
4386 * If we didn't find a chunk, and we haven't failed on this
4387 * block group before, and this block group is in the middle of
4388 * caching and we are ok with waiting, then go ahead and wait
4389 * for progress to be made, and set failed_alloc to true.
4390 *
4391 * If failed_alloc is true then we've already waited on this
4392 * block group once and should move on to the next block group.
4393 */
4394 if (!offset && !failed_alloc && !cached &&
4395 loop > LOOP_CACHING_NOWAIT) {
817d52f8 4396 wait_block_group_cache_progress(block_group,
1cdda9b8
JB
4397 num_bytes + empty_size);
4398 failed_alloc = true;
817d52f8 4399 goto have_block_group;
1cdda9b8
JB
4400 } else if (!offset) {
4401 goto loop;
817d52f8 4402 }
fa9c0d79 4403checks:
6226cb0a 4404 search_start = stripe_align(root, offset);
2552d17e 4405 /* move on to the next group */
6226cb0a
JB
4406 if (search_start + num_bytes >= search_end) {
4407 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 4408 goto loop;
6226cb0a 4409 }
25179201 4410
2552d17e
JB
4411 /* move on to the next group */
4412 if (search_start + num_bytes >
6226cb0a
JB
4413 block_group->key.objectid + block_group->key.offset) {
4414 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 4415 goto loop;
6226cb0a 4416 }
f5a31e16 4417
2552d17e
JB
4418 if (exclude_nr > 0 &&
4419 (search_start + num_bytes > exclude_start &&
4420 search_start < exclude_start + exclude_nr)) {
4421 search_start = exclude_start + exclude_nr;
4422
6226cb0a 4423 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e
JB
4424 /*
4425 * if search_start is still in this block group
4426 * then we just re-search this block group
f5a31e16 4427 */
2552d17e
JB
4428 if (search_start >= block_group->key.objectid &&
4429 search_start < (block_group->key.objectid +
6226cb0a 4430 block_group->key.offset))
2552d17e 4431 goto have_block_group;
2552d17e 4432 goto loop;
0f9dd46c 4433 }
0b86a832 4434
2552d17e
JB
4435 ins->objectid = search_start;
4436 ins->offset = num_bytes;
d2fb3437 4437
6226cb0a
JB
4438 if (offset < search_start)
4439 btrfs_add_free_space(block_group, offset,
4440 search_start - offset);
4441 BUG_ON(offset > search_start);
4442
11833d66
YZ
4443 update_reserved_extents(block_group, num_bytes, 1);
4444
2552d17e 4445 /* we are all good, lets return */
2552d17e
JB
4446 break;
4447loop:
0a24325e 4448 failed_cluster_refill = false;
1cdda9b8 4449 failed_alloc = false;
b742bb82 4450 BUG_ON(index != get_block_group_index(block_group));
fa9c0d79 4451 btrfs_put_block_group(block_group);
2552d17e
JB
4452 }
4453 up_read(&space_info->groups_sem);
4454
b742bb82
YZ
4455 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
4456 goto search;
4457
ccf0e725
JB
4458 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4459 * for them to make caching progress. Also
4460 * determine the best possible bg to cache
4461 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4462 * caching kthreads as we move along
817d52f8
JB
4463 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4464 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4465 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4466 * again
fa9c0d79 4467 */
817d52f8
JB
4468 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4469 (found_uncached_bg || empty_size || empty_cluster ||
4470 allowed_chunk_alloc)) {
b742bb82 4471 index = 0;
ccf0e725 4472 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
817d52f8 4473 found_uncached_bg = false;
ccf0e725
JB
4474 loop++;
4475 if (!ideal_cache_percent &&
4476 atomic_read(&space_info->caching_threads))
817d52f8 4477 goto search;
ccf0e725
JB
4478
4479 /*
4480 * 1 of the following 2 things have happened so far
4481 *
4482 * 1) We found an ideal block group for caching that
4483 * is mostly full and will cache quickly, so we might
4484 * as well wait for it.
4485 *
4486 * 2) We searched for cached only and we didn't find
4487 * anything, and we didn't start any caching kthreads
4488 * either, so chances are we will loop through and
4489 * start a couple caching kthreads, and then come back
4490 * around and just wait for them. This will be slower
4491 * because we will have 2 caching kthreads reading at
4492 * the same time when we could have just started one
4493 * and waited for it to get far enough to give us an
4494 * allocation, so go ahead and go to the wait caching
4495 * loop.
4496 */
4497 loop = LOOP_CACHING_WAIT;
4498 search_start = ideal_cache_offset;
4499 ideal_cache_percent = 0;
4500 goto ideal_cache;
4501 } else if (loop == LOOP_FIND_IDEAL) {
4502 /*
4503 * Didn't find a uncached bg, wait on anything we find
4504 * next.
4505 */
4506 loop = LOOP_CACHING_WAIT;
4507 goto search;
4508 }
4509
4510 if (loop < LOOP_CACHING_WAIT) {
4511 loop++;
4512 goto search;
817d52f8
JB
4513 }
4514
4515 if (loop == LOOP_ALLOC_CHUNK) {
fa9c0d79
CM
4516 empty_size = 0;
4517 empty_cluster = 0;
4518 }
2552d17e
JB
4519
4520 if (allowed_chunk_alloc) {
4521 ret = do_chunk_alloc(trans, root, num_bytes +
4522 2 * 1024 * 1024, data, 1);
2552d17e 4523 allowed_chunk_alloc = 0;
ccf0e725
JB
4524 done_chunk_alloc = 1;
4525 } else if (!done_chunk_alloc) {
2552d17e
JB
4526 space_info->force_alloc = 1;
4527 }
4528
817d52f8 4529 if (loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79 4530 loop++;
2552d17e 4531 goto search;
fa9c0d79 4532 }
2552d17e
JB
4533 ret = -ENOSPC;
4534 } else if (!ins->objectid) {
4535 ret = -ENOSPC;
f2654de4 4536 }
0b86a832 4537
80eb234a
JB
4538 /* we found what we needed */
4539 if (ins->objectid) {
4540 if (!(data & BTRFS_BLOCK_GROUP_DATA))
d2fb3437 4541 trans->block_group = block_group->key.objectid;
0f9dd46c 4542
fa9c0d79 4543 btrfs_put_block_group(block_group);
80eb234a 4544 ret = 0;
be744175 4545 }
be744175 4546
0f70abe2 4547 return ret;
fec577fb 4548}
ec44a35c 4549
9ed74f2d
JB
4550static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4551 int dump_block_groups)
0f9dd46c
JB
4552{
4553 struct btrfs_block_group_cache *cache;
b742bb82 4554 int index = 0;
0f9dd46c 4555
9ed74f2d 4556 spin_lock(&info->lock);
d397712b
CM
4557 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4558 (unsigned long long)(info->total_bytes - info->bytes_used -
9ed74f2d
JB
4559 info->bytes_pinned - info->bytes_reserved -
4560 info->bytes_super),
d397712b 4561 (info->full) ? "" : "not ");
6a63209f 4562 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
9ed74f2d
JB
4563 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4564 "\n",
21380931
JB
4565 (unsigned long long)info->total_bytes,
4566 (unsigned long long)info->bytes_pinned,
4567 (unsigned long long)info->bytes_delalloc,
4568 (unsigned long long)info->bytes_may_use,
9ed74f2d
JB
4569 (unsigned long long)info->bytes_used,
4570 (unsigned long long)info->bytes_root,
4571 (unsigned long long)info->bytes_super,
4572 (unsigned long long)info->bytes_reserved);
4573 spin_unlock(&info->lock);
4574
4575 if (!dump_block_groups)
4576 return;
0f9dd46c 4577
80eb234a 4578 down_read(&info->groups_sem);
b742bb82
YZ
4579again:
4580 list_for_each_entry(cache, &info->block_groups[index], list) {
0f9dd46c 4581 spin_lock(&cache->lock);
d397712b
CM
4582 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4583 "%llu pinned %llu reserved\n",
4584 (unsigned long long)cache->key.objectid,
4585 (unsigned long long)cache->key.offset,
4586 (unsigned long long)btrfs_block_group_used(&cache->item),
4587 (unsigned long long)cache->pinned,
4588 (unsigned long long)cache->reserved);
0f9dd46c
JB
4589 btrfs_dump_free_space(cache, bytes);
4590 spin_unlock(&cache->lock);
4591 }
b742bb82
YZ
4592 if (++index < BTRFS_NR_RAID_TYPES)
4593 goto again;
80eb234a 4594 up_read(&info->groups_sem);
0f9dd46c 4595}
e8569813 4596
11833d66
YZ
4597int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4598 struct btrfs_root *root,
4599 u64 num_bytes, u64 min_alloc_size,
4600 u64 empty_size, u64 hint_byte,
4601 u64 search_end, struct btrfs_key *ins,
4602 u64 data)
fec577fb
CM
4603{
4604 int ret;
fbdc762b 4605 u64 search_start = 0;
925baedd 4606
6a63209f 4607 data = btrfs_get_alloc_profile(root, data);
98d20f67 4608again:
0ef3e66b
CM
4609 /*
4610 * the only place that sets empty_size is btrfs_realloc_node, which
4611 * is not called recursively on allocations
4612 */
83d3c969 4613 if (empty_size || root->ref_cows)
6324fbf3 4614 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0ef3e66b 4615 num_bytes + 2 * 1024 * 1024, data, 0);
0b86a832 4616
db94535d
CM
4617 WARN_ON(num_bytes < root->sectorsize);
4618 ret = find_free_extent(trans, root, num_bytes, empty_size,
4619 search_start, search_end, hint_byte, ins,
26b8003f
CM
4620 trans->alloc_exclude_start,
4621 trans->alloc_exclude_nr, data);
3b951516 4622
98d20f67
CM
4623 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4624 num_bytes = num_bytes >> 1;
0f9dd46c 4625 num_bytes = num_bytes & ~(root->sectorsize - 1);
98d20f67 4626 num_bytes = max(num_bytes, min_alloc_size);
0ef3e66b
CM
4627 do_chunk_alloc(trans, root->fs_info->extent_root,
4628 num_bytes, data, 1);
98d20f67
CM
4629 goto again;
4630 }
817d52f8 4631 if (ret == -ENOSPC) {
0f9dd46c
JB
4632 struct btrfs_space_info *sinfo;
4633
4634 sinfo = __find_space_info(root->fs_info, data);
d397712b
CM
4635 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4636 "wanted %llu\n", (unsigned long long)data,
4637 (unsigned long long)num_bytes);
9ed74f2d 4638 dump_space_info(sinfo, num_bytes, 1);
925baedd 4639 }
0f9dd46c
JB
4640
4641 return ret;
e6dcd2dc
CM
4642}
4643
65b51a00
CM
4644int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4645{
0f9dd46c 4646 struct btrfs_block_group_cache *cache;
1f3c79a2 4647 int ret = 0;
0f9dd46c 4648
0f9dd46c
JB
4649 cache = btrfs_lookup_block_group(root->fs_info, start);
4650 if (!cache) {
d397712b
CM
4651 printk(KERN_ERR "Unable to find block group for %llu\n",
4652 (unsigned long long)start);
0f9dd46c
JB
4653 return -ENOSPC;
4654 }
1f3c79a2
LH
4655
4656 ret = btrfs_discard_extent(root, start, len);
4657
0f9dd46c 4658 btrfs_add_free_space(cache, start, len);
11833d66 4659 update_reserved_extents(cache, len, 0);
fa9c0d79 4660 btrfs_put_block_group(cache);
817d52f8 4661
e6dcd2dc
CM
4662 return ret;
4663}
4664
5d4f98a2
YZ
4665static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4666 struct btrfs_root *root,
4667 u64 parent, u64 root_objectid,
4668 u64 flags, u64 owner, u64 offset,
4669 struct btrfs_key *ins, int ref_mod)
e6dcd2dc
CM
4670{
4671 int ret;
5d4f98a2 4672 struct btrfs_fs_info *fs_info = root->fs_info;
e6dcd2dc 4673 struct btrfs_extent_item *extent_item;
5d4f98a2 4674 struct btrfs_extent_inline_ref *iref;
e6dcd2dc 4675 struct btrfs_path *path;
5d4f98a2
YZ
4676 struct extent_buffer *leaf;
4677 int type;
4678 u32 size;
26b8003f 4679
5d4f98a2
YZ
4680 if (parent > 0)
4681 type = BTRFS_SHARED_DATA_REF_KEY;
4682 else
4683 type = BTRFS_EXTENT_DATA_REF_KEY;
58176a96 4684
5d4f98a2 4685 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7bb86316
CM
4686
4687 path = btrfs_alloc_path();
4688 BUG_ON(!path);
47e4bb98 4689
b9473439 4690 path->leave_spinning = 1;
5d4f98a2
YZ
4691 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4692 ins, size);
ccd467d6 4693 BUG_ON(ret);
0f9dd46c 4694
5d4f98a2
YZ
4695 leaf = path->nodes[0];
4696 extent_item = btrfs_item_ptr(leaf, path->slots[0],
47e4bb98 4697 struct btrfs_extent_item);
5d4f98a2
YZ
4698 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4699 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4700 btrfs_set_extent_flags(leaf, extent_item,
4701 flags | BTRFS_EXTENT_FLAG_DATA);
4702
4703 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4704 btrfs_set_extent_inline_ref_type(leaf, iref, type);
4705 if (parent > 0) {
4706 struct btrfs_shared_data_ref *ref;
4707 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4708 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4709 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4710 } else {
4711 struct btrfs_extent_data_ref *ref;
4712 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4713 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4714 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4715 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4716 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4717 }
47e4bb98
CM
4718
4719 btrfs_mark_buffer_dirty(path->nodes[0]);
7bb86316 4720 btrfs_free_path(path);
f510cfec 4721
5d4f98a2
YZ
4722 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4723 1, 0);
f5947066 4724 if (ret) {
d397712b
CM
4725 printk(KERN_ERR "btrfs update block group failed for %llu "
4726 "%llu\n", (unsigned long long)ins->objectid,
4727 (unsigned long long)ins->offset);
f5947066
CM
4728 BUG();
4729 }
e6dcd2dc
CM
4730 return ret;
4731}
4732
5d4f98a2
YZ
4733static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4734 struct btrfs_root *root,
4735 u64 parent, u64 root_objectid,
4736 u64 flags, struct btrfs_disk_key *key,
4737 int level, struct btrfs_key *ins)
e6dcd2dc
CM
4738{
4739 int ret;
5d4f98a2
YZ
4740 struct btrfs_fs_info *fs_info = root->fs_info;
4741 struct btrfs_extent_item *extent_item;
4742 struct btrfs_tree_block_info *block_info;
4743 struct btrfs_extent_inline_ref *iref;
4744 struct btrfs_path *path;
4745 struct extent_buffer *leaf;
4746 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
1c2308f8 4747
5d4f98a2
YZ
4748 path = btrfs_alloc_path();
4749 BUG_ON(!path);
56bec294 4750
5d4f98a2
YZ
4751 path->leave_spinning = 1;
4752 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4753 ins, size);
56bec294 4754 BUG_ON(ret);
5d4f98a2
YZ
4755
4756 leaf = path->nodes[0];
4757 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4758 struct btrfs_extent_item);
4759 btrfs_set_extent_refs(leaf, extent_item, 1);
4760 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4761 btrfs_set_extent_flags(leaf, extent_item,
4762 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4763 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4764
4765 btrfs_set_tree_block_key(leaf, block_info, key);
4766 btrfs_set_tree_block_level(leaf, block_info, level);
4767
4768 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4769 if (parent > 0) {
4770 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4771 btrfs_set_extent_inline_ref_type(leaf, iref,
4772 BTRFS_SHARED_BLOCK_REF_KEY);
4773 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4774 } else {
4775 btrfs_set_extent_inline_ref_type(leaf, iref,
4776 BTRFS_TREE_BLOCK_REF_KEY);
4777 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4778 }
4779
4780 btrfs_mark_buffer_dirty(leaf);
4781 btrfs_free_path(path);
4782
4783 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4784 1, 0);
4785 if (ret) {
4786 printk(KERN_ERR "btrfs update block group failed for %llu "
4787 "%llu\n", (unsigned long long)ins->objectid,
4788 (unsigned long long)ins->offset);
4789 BUG();
4790 }
4791 return ret;
4792}
4793
4794int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4795 struct btrfs_root *root,
4796 u64 root_objectid, u64 owner,
4797 u64 offset, struct btrfs_key *ins)
4798{
4799 int ret;
4800
4801 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4802
4803 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4804 0, root_objectid, owner, offset,
4805 BTRFS_ADD_DELAYED_EXTENT, NULL);
e6dcd2dc
CM
4806 return ret;
4807}
e02119d5
CM
4808
4809/*
4810 * this is used by the tree logging recovery code. It records that
4811 * an extent has been allocated and makes sure to clear the free
4812 * space cache bits as well
4813 */
5d4f98a2
YZ
4814int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4815 struct btrfs_root *root,
4816 u64 root_objectid, u64 owner, u64 offset,
4817 struct btrfs_key *ins)
e02119d5
CM
4818{
4819 int ret;
4820 struct btrfs_block_group_cache *block_group;
11833d66
YZ
4821 struct btrfs_caching_control *caching_ctl;
4822 u64 start = ins->objectid;
4823 u64 num_bytes = ins->offset;
e02119d5 4824
e02119d5 4825 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
817d52f8 4826 cache_block_group(block_group);
11833d66 4827 caching_ctl = get_caching_control(block_group);
e02119d5 4828
11833d66
YZ
4829 if (!caching_ctl) {
4830 BUG_ON(!block_group_cache_done(block_group));
4831 ret = btrfs_remove_free_space(block_group, start, num_bytes);
4832 BUG_ON(ret);
4833 } else {
4834 mutex_lock(&caching_ctl->mutex);
4835
4836 if (start >= caching_ctl->progress) {
4837 ret = add_excluded_extent(root, start, num_bytes);
4838 BUG_ON(ret);
4839 } else if (start + num_bytes <= caching_ctl->progress) {
4840 ret = btrfs_remove_free_space(block_group,
4841 start, num_bytes);
4842 BUG_ON(ret);
4843 } else {
4844 num_bytes = caching_ctl->progress - start;
4845 ret = btrfs_remove_free_space(block_group,
4846 start, num_bytes);
4847 BUG_ON(ret);
4848
4849 start = caching_ctl->progress;
4850 num_bytes = ins->objectid + ins->offset -
4851 caching_ctl->progress;
4852 ret = add_excluded_extent(root, start, num_bytes);
4853 BUG_ON(ret);
4854 }
4855
4856 mutex_unlock(&caching_ctl->mutex);
4857 put_caching_control(caching_ctl);
4858 }
4859
4860 update_reserved_extents(block_group, ins->offset, 1);
fa9c0d79 4861 btrfs_put_block_group(block_group);
5d4f98a2
YZ
4862 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4863 0, owner, offset, ins, 1);
e02119d5
CM
4864 return ret;
4865}
4866
e6dcd2dc
CM
4867/*
4868 * finds a free extent and does all the dirty work required for allocation
4869 * returns the key for the extent through ins, and a tree buffer for
4870 * the first block of the extent through buf.
4871 *
4872 * returns 0 if everything worked, non-zero otherwise.
4873 */
5d4f98a2
YZ
4874static int alloc_tree_block(struct btrfs_trans_handle *trans,
4875 struct btrfs_root *root,
4876 u64 num_bytes, u64 parent, u64 root_objectid,
4877 struct btrfs_disk_key *key, int level,
4878 u64 empty_size, u64 hint_byte, u64 search_end,
4879 struct btrfs_key *ins)
e6dcd2dc
CM
4880{
4881 int ret;
5d4f98a2
YZ
4882 u64 flags = 0;
4883
11833d66
YZ
4884 ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4885 empty_size, hint_byte, search_end,
4886 ins, 0);
817d52f8
JB
4887 if (ret)
4888 return ret;
5d4f98a2
YZ
4889
4890 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4891 if (parent == 0)
4892 parent = ins->objectid;
4893 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4894 } else
4895 BUG_ON(parent > 0);
4896
d00aff00 4897 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5d4f98a2
YZ
4898 struct btrfs_delayed_extent_op *extent_op;
4899 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4900 BUG_ON(!extent_op);
4901 if (key)
4902 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4903 else
4904 memset(&extent_op->key, 0, sizeof(extent_op->key));
4905 extent_op->flags_to_set = flags;
4906 extent_op->update_key = 1;
4907 extent_op->update_flags = 1;
4908 extent_op->is_data = 0;
4909
4910 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4911 ins->offset, parent, root_objectid,
4912 level, BTRFS_ADD_DELAYED_EXTENT,
4913 extent_op);
d00aff00 4914 BUG_ON(ret);
d00aff00 4915 }
86b9f2ec
YZ
4916
4917 if (root_objectid == root->root_key.objectid) {
4918 u64 used;
4919 spin_lock(&root->node_lock);
4920 used = btrfs_root_used(&root->root_item) + num_bytes;
4921 btrfs_set_root_used(&root->root_item, used);
4922 spin_unlock(&root->node_lock);
4923 }
925baedd 4924 return ret;
fec577fb 4925}
65b51a00
CM
4926
4927struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4928 struct btrfs_root *root,
4008c04a
CM
4929 u64 bytenr, u32 blocksize,
4930 int level)
65b51a00
CM
4931{
4932 struct extent_buffer *buf;
4933
4934 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4935 if (!buf)
4936 return ERR_PTR(-ENOMEM);
4937 btrfs_set_header_generation(buf, trans->transid);
4008c04a 4938 btrfs_set_buffer_lockdep_class(buf, level);
65b51a00
CM
4939 btrfs_tree_lock(buf);
4940 clean_tree_block(trans, root, buf);
b4ce94de
CM
4941
4942 btrfs_set_lock_blocking(buf);
65b51a00 4943 btrfs_set_buffer_uptodate(buf);
b4ce94de 4944
d0c803c4 4945 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8cef4e16
YZ
4946 /*
4947 * we allow two log transactions at a time, use different
4948 * EXENT bit to differentiate dirty pages.
4949 */
4950 if (root->log_transid % 2 == 0)
4951 set_extent_dirty(&root->dirty_log_pages, buf->start,
4952 buf->start + buf->len - 1, GFP_NOFS);
4953 else
4954 set_extent_new(&root->dirty_log_pages, buf->start,
4955 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4
CM
4956 } else {
4957 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
65b51a00 4958 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4 4959 }
65b51a00 4960 trans->blocks_used++;
b4ce94de 4961 /* this returns a buffer locked for blocking */
65b51a00
CM
4962 return buf;
4963}
4964
fec577fb
CM
4965/*
4966 * helper function to allocate a block for a given tree
4967 * returns the tree buffer or NULL.
4968 */
5f39d397 4969struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
4970 struct btrfs_root *root, u32 blocksize,
4971 u64 parent, u64 root_objectid,
4972 struct btrfs_disk_key *key, int level,
4973 u64 hint, u64 empty_size)
fec577fb 4974{
e2fa7227 4975 struct btrfs_key ins;
fec577fb 4976 int ret;
5f39d397 4977 struct extent_buffer *buf;
fec577fb 4978
5d4f98a2
YZ
4979 ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4980 key, level, empty_size, hint, (u64)-1, &ins);
fec577fb 4981 if (ret) {
54aa1f4d
CM
4982 BUG_ON(ret > 0);
4983 return ERR_PTR(ret);
fec577fb 4984 }
55c69072 4985
4008c04a
CM
4986 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4987 blocksize, level);
fec577fb
CM
4988 return buf;
4989}
a28ec197 4990
2c47e605
YZ
4991struct walk_control {
4992 u64 refs[BTRFS_MAX_LEVEL];
4993 u64 flags[BTRFS_MAX_LEVEL];
4994 struct btrfs_key update_progress;
4995 int stage;
4996 int level;
4997 int shared_level;
4998 int update_ref;
4999 int keep_locks;
1c4850e2
YZ
5000 int reada_slot;
5001 int reada_count;
2c47e605
YZ
5002};
5003
5004#define DROP_REFERENCE 1
5005#define UPDATE_BACKREF 2
5006
1c4850e2
YZ
5007static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5008 struct btrfs_root *root,
5009 struct walk_control *wc,
5010 struct btrfs_path *path)
6407bf6d 5011{
1c4850e2
YZ
5012 u64 bytenr;
5013 u64 generation;
5014 u64 refs;
94fcca9f 5015 u64 flags;
1c4850e2 5016 u64 last = 0;
5d4f98a2 5017 u32 nritems;
1c4850e2
YZ
5018 u32 blocksize;
5019 struct btrfs_key key;
5020 struct extent_buffer *eb;
6407bf6d 5021 int ret;
1c4850e2
YZ
5022 int slot;
5023 int nread = 0;
6407bf6d 5024
1c4850e2
YZ
5025 if (path->slots[wc->level] < wc->reada_slot) {
5026 wc->reada_count = wc->reada_count * 2 / 3;
5027 wc->reada_count = max(wc->reada_count, 2);
5028 } else {
5029 wc->reada_count = wc->reada_count * 3 / 2;
5030 wc->reada_count = min_t(int, wc->reada_count,
5031 BTRFS_NODEPTRS_PER_BLOCK(root));
5032 }
7bb86316 5033
1c4850e2
YZ
5034 eb = path->nodes[wc->level];
5035 nritems = btrfs_header_nritems(eb);
5036 blocksize = btrfs_level_size(root, wc->level - 1);
bd56b302 5037
1c4850e2
YZ
5038 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5039 if (nread >= wc->reada_count)
5040 break;
bd56b302 5041
2dd3e67b 5042 cond_resched();
1c4850e2
YZ
5043 bytenr = btrfs_node_blockptr(eb, slot);
5044 generation = btrfs_node_ptr_generation(eb, slot);
2dd3e67b 5045
1c4850e2
YZ
5046 if (slot == path->slots[wc->level])
5047 goto reada;
5d4f98a2 5048
1c4850e2
YZ
5049 if (wc->stage == UPDATE_BACKREF &&
5050 generation <= root->root_key.offset)
bd56b302
CM
5051 continue;
5052
94fcca9f
YZ
5053 /* We don't lock the tree block, it's OK to be racy here */
5054 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5055 &refs, &flags);
5056 BUG_ON(ret);
5057 BUG_ON(refs == 0);
5058
1c4850e2 5059 if (wc->stage == DROP_REFERENCE) {
1c4850e2
YZ
5060 if (refs == 1)
5061 goto reada;
bd56b302 5062
94fcca9f
YZ
5063 if (wc->level == 1 &&
5064 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5065 continue;
1c4850e2
YZ
5066 if (!wc->update_ref ||
5067 generation <= root->root_key.offset)
5068 continue;
5069 btrfs_node_key_to_cpu(eb, &key, slot);
5070 ret = btrfs_comp_cpu_keys(&key,
5071 &wc->update_progress);
5072 if (ret < 0)
5073 continue;
94fcca9f
YZ
5074 } else {
5075 if (wc->level == 1 &&
5076 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5077 continue;
6407bf6d 5078 }
1c4850e2
YZ
5079reada:
5080 ret = readahead_tree_block(root, bytenr, blocksize,
5081 generation);
5082 if (ret)
bd56b302 5083 break;
1c4850e2
YZ
5084 last = bytenr + blocksize;
5085 nread++;
20524f02 5086 }
1c4850e2 5087 wc->reada_slot = slot;
20524f02 5088}
2c47e605 5089
f82d02d9 5090/*
2c47e605
YZ
5091 * hepler to process tree block while walking down the tree.
5092 *
2c47e605
YZ
5093 * when wc->stage == UPDATE_BACKREF, this function updates
5094 * back refs for pointers in the block.
5095 *
5096 * NOTE: return value 1 means we should stop walking down.
f82d02d9 5097 */
2c47e605 5098static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5d4f98a2 5099 struct btrfs_root *root,
2c47e605 5100 struct btrfs_path *path,
94fcca9f 5101 struct walk_control *wc, int lookup_info)
f82d02d9 5102{
2c47e605
YZ
5103 int level = wc->level;
5104 struct extent_buffer *eb = path->nodes[level];
2c47e605 5105 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
f82d02d9
YZ
5106 int ret;
5107
2c47e605
YZ
5108 if (wc->stage == UPDATE_BACKREF &&
5109 btrfs_header_owner(eb) != root->root_key.objectid)
5110 return 1;
f82d02d9 5111
2c47e605
YZ
5112 /*
5113 * when reference count of tree block is 1, it won't increase
5114 * again. once full backref flag is set, we never clear it.
5115 */
94fcca9f
YZ
5116 if (lookup_info &&
5117 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5118 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
2c47e605
YZ
5119 BUG_ON(!path->locks[level]);
5120 ret = btrfs_lookup_extent_info(trans, root,
5121 eb->start, eb->len,
5122 &wc->refs[level],
5123 &wc->flags[level]);
5124 BUG_ON(ret);
5125 BUG_ON(wc->refs[level] == 0);
5126 }
5d4f98a2 5127
2c47e605
YZ
5128 if (wc->stage == DROP_REFERENCE) {
5129 if (wc->refs[level] > 1)
5130 return 1;
f82d02d9 5131
2c47e605
YZ
5132 if (path->locks[level] && !wc->keep_locks) {
5133 btrfs_tree_unlock(eb);
5134 path->locks[level] = 0;
5135 }
5136 return 0;
5137 }
f82d02d9 5138
2c47e605
YZ
5139 /* wc->stage == UPDATE_BACKREF */
5140 if (!(wc->flags[level] & flag)) {
5141 BUG_ON(!path->locks[level]);
5142 ret = btrfs_inc_ref(trans, root, eb, 1);
f82d02d9 5143 BUG_ON(ret);
2c47e605
YZ
5144 ret = btrfs_dec_ref(trans, root, eb, 0);
5145 BUG_ON(ret);
5146 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5147 eb->len, flag, 0);
5148 BUG_ON(ret);
5149 wc->flags[level] |= flag;
5150 }
5151
5152 /*
5153 * the block is shared by multiple trees, so it's not good to
5154 * keep the tree lock
5155 */
5156 if (path->locks[level] && level > 0) {
5157 btrfs_tree_unlock(eb);
5158 path->locks[level] = 0;
5159 }
5160 return 0;
5161}
5162
1c4850e2
YZ
5163/*
5164 * hepler to process tree block pointer.
5165 *
5166 * when wc->stage == DROP_REFERENCE, this function checks
5167 * reference count of the block pointed to. if the block
5168 * is shared and we need update back refs for the subtree
5169 * rooted at the block, this function changes wc->stage to
5170 * UPDATE_BACKREF. if the block is shared and there is no
5171 * need to update back, this function drops the reference
5172 * to the block.
5173 *
5174 * NOTE: return value 1 means we should stop walking down.
5175 */
5176static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5177 struct btrfs_root *root,
5178 struct btrfs_path *path,
94fcca9f 5179 struct walk_control *wc, int *lookup_info)
1c4850e2
YZ
5180{
5181 u64 bytenr;
5182 u64 generation;
5183 u64 parent;
5184 u32 blocksize;
5185 struct btrfs_key key;
5186 struct extent_buffer *next;
5187 int level = wc->level;
5188 int reada = 0;
5189 int ret = 0;
5190
5191 generation = btrfs_node_ptr_generation(path->nodes[level],
5192 path->slots[level]);
5193 /*
5194 * if the lower level block was created before the snapshot
5195 * was created, we know there is no need to update back refs
5196 * for the subtree
5197 */
5198 if (wc->stage == UPDATE_BACKREF &&
94fcca9f
YZ
5199 generation <= root->root_key.offset) {
5200 *lookup_info = 1;
1c4850e2 5201 return 1;
94fcca9f 5202 }
1c4850e2
YZ
5203
5204 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5205 blocksize = btrfs_level_size(root, level - 1);
5206
5207 next = btrfs_find_tree_block(root, bytenr, blocksize);
5208 if (!next) {
5209 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
90d2c51d
MX
5210 if (!next)
5211 return -ENOMEM;
1c4850e2
YZ
5212 reada = 1;
5213 }
5214 btrfs_tree_lock(next);
5215 btrfs_set_lock_blocking(next);
5216
94fcca9f
YZ
5217 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5218 &wc->refs[level - 1],
5219 &wc->flags[level - 1]);
5220 BUG_ON(ret);
5221 BUG_ON(wc->refs[level - 1] == 0);
5222 *lookup_info = 0;
1c4850e2 5223
94fcca9f 5224 if (wc->stage == DROP_REFERENCE) {
1c4850e2 5225 if (wc->refs[level - 1] > 1) {
94fcca9f
YZ
5226 if (level == 1 &&
5227 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5228 goto skip;
5229
1c4850e2
YZ
5230 if (!wc->update_ref ||
5231 generation <= root->root_key.offset)
5232 goto skip;
5233
5234 btrfs_node_key_to_cpu(path->nodes[level], &key,
5235 path->slots[level]);
5236 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5237 if (ret < 0)
5238 goto skip;
5239
5240 wc->stage = UPDATE_BACKREF;
5241 wc->shared_level = level - 1;
5242 }
94fcca9f
YZ
5243 } else {
5244 if (level == 1 &&
5245 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5246 goto skip;
1c4850e2
YZ
5247 }
5248
5249 if (!btrfs_buffer_uptodate(next, generation)) {
5250 btrfs_tree_unlock(next);
5251 free_extent_buffer(next);
5252 next = NULL;
94fcca9f 5253 *lookup_info = 1;
1c4850e2
YZ
5254 }
5255
5256 if (!next) {
5257 if (reada && level == 1)
5258 reada_walk_down(trans, root, wc, path);
5259 next = read_tree_block(root, bytenr, blocksize, generation);
5260 btrfs_tree_lock(next);
5261 btrfs_set_lock_blocking(next);
5262 }
5263
5264 level--;
5265 BUG_ON(level != btrfs_header_level(next));
5266 path->nodes[level] = next;
5267 path->slots[level] = 0;
5268 path->locks[level] = 1;
5269 wc->level = level;
5270 if (wc->level == 1)
5271 wc->reada_slot = 0;
5272 return 0;
5273skip:
5274 wc->refs[level - 1] = 0;
5275 wc->flags[level - 1] = 0;
94fcca9f
YZ
5276 if (wc->stage == DROP_REFERENCE) {
5277 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5278 parent = path->nodes[level]->start;
5279 } else {
5280 BUG_ON(root->root_key.objectid !=
5281 btrfs_header_owner(path->nodes[level]));
5282 parent = 0;
5283 }
1c4850e2 5284
94fcca9f
YZ
5285 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5286 root->root_key.objectid, level - 1, 0);
5287 BUG_ON(ret);
1c4850e2 5288 }
1c4850e2
YZ
5289 btrfs_tree_unlock(next);
5290 free_extent_buffer(next);
94fcca9f 5291 *lookup_info = 1;
1c4850e2
YZ
5292 return 1;
5293}
5294
2c47e605
YZ
5295/*
5296 * hepler to process tree block while walking up the tree.
5297 *
5298 * when wc->stage == DROP_REFERENCE, this function drops
5299 * reference count on the block.
5300 *
5301 * when wc->stage == UPDATE_BACKREF, this function changes
5302 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5303 * to UPDATE_BACKREF previously while processing the block.
5304 *
5305 * NOTE: return value 1 means we should stop walking up.
5306 */
5307static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5308 struct btrfs_root *root,
5309 struct btrfs_path *path,
5310 struct walk_control *wc)
5311{
5312 int ret = 0;
5313 int level = wc->level;
5314 struct extent_buffer *eb = path->nodes[level];
5315 u64 parent = 0;
5316
5317 if (wc->stage == UPDATE_BACKREF) {
5318 BUG_ON(wc->shared_level < level);
5319 if (level < wc->shared_level)
5320 goto out;
5321
2c47e605
YZ
5322 ret = find_next_key(path, level + 1, &wc->update_progress);
5323 if (ret > 0)
5324 wc->update_ref = 0;
5325
5326 wc->stage = DROP_REFERENCE;
5327 wc->shared_level = -1;
5328 path->slots[level] = 0;
5329
5330 /*
5331 * check reference count again if the block isn't locked.
5332 * we should start walking down the tree again if reference
5333 * count is one.
5334 */
5335 if (!path->locks[level]) {
5336 BUG_ON(level == 0);
5337 btrfs_tree_lock(eb);
5338 btrfs_set_lock_blocking(eb);
5339 path->locks[level] = 1;
5340
5341 ret = btrfs_lookup_extent_info(trans, root,
5342 eb->start, eb->len,
5343 &wc->refs[level],
5344 &wc->flags[level]);
f82d02d9 5345 BUG_ON(ret);
2c47e605
YZ
5346 BUG_ON(wc->refs[level] == 0);
5347 if (wc->refs[level] == 1) {
5348 btrfs_tree_unlock(eb);
5349 path->locks[level] = 0;
5350 return 1;
5351 }
f82d02d9 5352 }
2c47e605 5353 }
f82d02d9 5354
2c47e605
YZ
5355 /* wc->stage == DROP_REFERENCE */
5356 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5d4f98a2 5357
2c47e605
YZ
5358 if (wc->refs[level] == 1) {
5359 if (level == 0) {
5360 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5361 ret = btrfs_dec_ref(trans, root, eb, 1);
5362 else
5363 ret = btrfs_dec_ref(trans, root, eb, 0);
5364 BUG_ON(ret);
5365 }
5366 /* make block locked assertion in clean_tree_block happy */
5367 if (!path->locks[level] &&
5368 btrfs_header_generation(eb) == trans->transid) {
5369 btrfs_tree_lock(eb);
5370 btrfs_set_lock_blocking(eb);
5371 path->locks[level] = 1;
5372 }
5373 clean_tree_block(trans, root, eb);
5374 }
5375
5376 if (eb == root->node) {
5377 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5378 parent = eb->start;
5379 else
5380 BUG_ON(root->root_key.objectid !=
5381 btrfs_header_owner(eb));
5382 } else {
5383 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5384 parent = path->nodes[level + 1]->start;
5385 else
5386 BUG_ON(root->root_key.objectid !=
5387 btrfs_header_owner(path->nodes[level + 1]));
f82d02d9 5388 }
f82d02d9 5389
2c47e605
YZ
5390 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
5391 root->root_key.objectid, level, 0);
f82d02d9 5392 BUG_ON(ret);
2c47e605
YZ
5393out:
5394 wc->refs[level] = 0;
5395 wc->flags[level] = 0;
5396 return ret;
5397}
5398
5399static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5400 struct btrfs_root *root,
5401 struct btrfs_path *path,
5402 struct walk_control *wc)
5403{
2c47e605 5404 int level = wc->level;
94fcca9f 5405 int lookup_info = 1;
2c47e605
YZ
5406 int ret;
5407
5408 while (level >= 0) {
94fcca9f 5409 ret = walk_down_proc(trans, root, path, wc, lookup_info);
2c47e605
YZ
5410 if (ret > 0)
5411 break;
5412
5413 if (level == 0)
5414 break;
5415
7a7965f8
YZ
5416 if (path->slots[level] >=
5417 btrfs_header_nritems(path->nodes[level]))
5418 break;
5419
94fcca9f 5420 ret = do_walk_down(trans, root, path, wc, &lookup_info);
1c4850e2
YZ
5421 if (ret > 0) {
5422 path->slots[level]++;
5423 continue;
90d2c51d
MX
5424 } else if (ret < 0)
5425 return ret;
1c4850e2 5426 level = wc->level;
f82d02d9 5427 }
f82d02d9
YZ
5428 return 0;
5429}
5430
d397712b 5431static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
98ed5174 5432 struct btrfs_root *root,
f82d02d9 5433 struct btrfs_path *path,
2c47e605 5434 struct walk_control *wc, int max_level)
20524f02 5435{
2c47e605 5436 int level = wc->level;
20524f02 5437 int ret;
9f3a7427 5438
2c47e605
YZ
5439 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5440 while (level < max_level && path->nodes[level]) {
5441 wc->level = level;
5442 if (path->slots[level] + 1 <
5443 btrfs_header_nritems(path->nodes[level])) {
5444 path->slots[level]++;
20524f02
CM
5445 return 0;
5446 } else {
2c47e605
YZ
5447 ret = walk_up_proc(trans, root, path, wc);
5448 if (ret > 0)
5449 return 0;
bd56b302 5450
2c47e605
YZ
5451 if (path->locks[level]) {
5452 btrfs_tree_unlock(path->nodes[level]);
5453 path->locks[level] = 0;
f82d02d9 5454 }
2c47e605
YZ
5455 free_extent_buffer(path->nodes[level]);
5456 path->nodes[level] = NULL;
5457 level++;
20524f02
CM
5458 }
5459 }
5460 return 1;
5461}
5462
9aca1d51 5463/*
2c47e605
YZ
5464 * drop a subvolume tree.
5465 *
5466 * this function traverses the tree freeing any blocks that only
5467 * referenced by the tree.
5468 *
5469 * when a shared tree block is found. this function decreases its
5470 * reference count by one. if update_ref is true, this function
5471 * also make sure backrefs for the shared block and all lower level
5472 * blocks are properly updated.
9aca1d51 5473 */
2c47e605 5474int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
20524f02 5475{
5caf2a00 5476 struct btrfs_path *path;
2c47e605
YZ
5477 struct btrfs_trans_handle *trans;
5478 struct btrfs_root *tree_root = root->fs_info->tree_root;
9f3a7427 5479 struct btrfs_root_item *root_item = &root->root_item;
2c47e605
YZ
5480 struct walk_control *wc;
5481 struct btrfs_key key;
5482 int err = 0;
5483 int ret;
5484 int level;
20524f02 5485
5caf2a00
CM
5486 path = btrfs_alloc_path();
5487 BUG_ON(!path);
20524f02 5488
2c47e605
YZ
5489 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5490 BUG_ON(!wc);
5491
5492 trans = btrfs_start_transaction(tree_root, 1);
5493
9f3a7427 5494 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2c47e605 5495 level = btrfs_header_level(root->node);
5d4f98a2
YZ
5496 path->nodes[level] = btrfs_lock_root_node(root);
5497 btrfs_set_lock_blocking(path->nodes[level]);
9f3a7427 5498 path->slots[level] = 0;
5d4f98a2 5499 path->locks[level] = 1;
2c47e605
YZ
5500 memset(&wc->update_progress, 0,
5501 sizeof(wc->update_progress));
9f3a7427 5502 } else {
9f3a7427 5503 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2c47e605
YZ
5504 memcpy(&wc->update_progress, &key,
5505 sizeof(wc->update_progress));
5506
6702ed49 5507 level = root_item->drop_level;
2c47e605 5508 BUG_ON(level == 0);
6702ed49 5509 path->lowest_level = level;
2c47e605
YZ
5510 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5511 path->lowest_level = 0;
5512 if (ret < 0) {
5513 err = ret;
9f3a7427
CM
5514 goto out;
5515 }
1c4850e2 5516 WARN_ON(ret > 0);
2c47e605 5517
7d9eb12c
CM
5518 /*
5519 * unlock our path, this is safe because only this
5520 * function is allowed to delete this snapshot
5521 */
5d4f98a2 5522 btrfs_unlock_up_safe(path, 0);
2c47e605
YZ
5523
5524 level = btrfs_header_level(root->node);
5525 while (1) {
5526 btrfs_tree_lock(path->nodes[level]);
5527 btrfs_set_lock_blocking(path->nodes[level]);
5528
5529 ret = btrfs_lookup_extent_info(trans, root,
5530 path->nodes[level]->start,
5531 path->nodes[level]->len,
5532 &wc->refs[level],
5533 &wc->flags[level]);
5534 BUG_ON(ret);
5535 BUG_ON(wc->refs[level] == 0);
5536
5537 if (level == root_item->drop_level)
5538 break;
5539
5540 btrfs_tree_unlock(path->nodes[level]);
5541 WARN_ON(wc->refs[level] != 1);
5542 level--;
5543 }
9f3a7427 5544 }
2c47e605
YZ
5545
5546 wc->level = level;
5547 wc->shared_level = -1;
5548 wc->stage = DROP_REFERENCE;
5549 wc->update_ref = update_ref;
5550 wc->keep_locks = 0;
1c4850e2 5551 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
2c47e605 5552
d397712b 5553 while (1) {
2c47e605
YZ
5554 ret = walk_down_tree(trans, root, path, wc);
5555 if (ret < 0) {
5556 err = ret;
20524f02 5557 break;
2c47e605 5558 }
9aca1d51 5559
2c47e605
YZ
5560 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5561 if (ret < 0) {
5562 err = ret;
20524f02 5563 break;
2c47e605
YZ
5564 }
5565
5566 if (ret > 0) {
5567 BUG_ON(wc->stage != DROP_REFERENCE);
e7a84565
CM
5568 break;
5569 }
2c47e605
YZ
5570
5571 if (wc->stage == DROP_REFERENCE) {
5572 level = wc->level;
5573 btrfs_node_key(path->nodes[level],
5574 &root_item->drop_progress,
5575 path->slots[level]);
5576 root_item->drop_level = level;
5577 }
5578
5579 BUG_ON(wc->level == 0);
5580 if (trans->transaction->in_commit ||
5581 trans->transaction->delayed_refs.flushing) {
5582 ret = btrfs_update_root(trans, tree_root,
5583 &root->root_key,
5584 root_item);
5585 BUG_ON(ret);
5586
5587 btrfs_end_transaction(trans, tree_root);
5588 trans = btrfs_start_transaction(tree_root, 1);
5589 } else {
5590 unsigned long update;
c3e69d58
CM
5591 update = trans->delayed_ref_updates;
5592 trans->delayed_ref_updates = 0;
5593 if (update)
2c47e605
YZ
5594 btrfs_run_delayed_refs(trans, tree_root,
5595 update);
c3e69d58 5596 }
20524f02 5597 }
2c47e605
YZ
5598 btrfs_release_path(root, path);
5599 BUG_ON(err);
5600
5601 ret = btrfs_del_root(trans, tree_root, &root->root_key);
5602 BUG_ON(ret);
5603
76dda93c
YZ
5604 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5605 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
5606 NULL, NULL);
5607 BUG_ON(ret < 0);
5608 if (ret > 0) {
5609 ret = btrfs_del_orphan_item(trans, tree_root,
5610 root->root_key.objectid);
5611 BUG_ON(ret);
5612 }
5613 }
5614
5615 if (root->in_radix) {
5616 btrfs_free_fs_root(tree_root->fs_info, root);
5617 } else {
5618 free_extent_buffer(root->node);
5619 free_extent_buffer(root->commit_root);
5620 kfree(root);
5621 }
9f3a7427 5622out:
2c47e605
YZ
5623 btrfs_end_transaction(trans, tree_root);
5624 kfree(wc);
5caf2a00 5625 btrfs_free_path(path);
2c47e605 5626 return err;
20524f02 5627}
9078a3e1 5628
2c47e605
YZ
5629/*
5630 * drop subtree rooted at tree block 'node'.
5631 *
5632 * NOTE: this function will unlock and release tree block 'node'
5633 */
f82d02d9
YZ
5634int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5635 struct btrfs_root *root,
5636 struct extent_buffer *node,
5637 struct extent_buffer *parent)
5638{
5639 struct btrfs_path *path;
2c47e605 5640 struct walk_control *wc;
f82d02d9
YZ
5641 int level;
5642 int parent_level;
5643 int ret = 0;
5644 int wret;
5645
2c47e605
YZ
5646 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5647
f82d02d9
YZ
5648 path = btrfs_alloc_path();
5649 BUG_ON(!path);
5650
2c47e605
YZ
5651 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5652 BUG_ON(!wc);
5653
b9447ef8 5654 btrfs_assert_tree_locked(parent);
f82d02d9
YZ
5655 parent_level = btrfs_header_level(parent);
5656 extent_buffer_get(parent);
5657 path->nodes[parent_level] = parent;
5658 path->slots[parent_level] = btrfs_header_nritems(parent);
5659
b9447ef8 5660 btrfs_assert_tree_locked(node);
f82d02d9 5661 level = btrfs_header_level(node);
f82d02d9
YZ
5662 path->nodes[level] = node;
5663 path->slots[level] = 0;
2c47e605
YZ
5664 path->locks[level] = 1;
5665
5666 wc->refs[parent_level] = 1;
5667 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5668 wc->level = level;
5669 wc->shared_level = -1;
5670 wc->stage = DROP_REFERENCE;
5671 wc->update_ref = 0;
5672 wc->keep_locks = 1;
1c4850e2 5673 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
f82d02d9
YZ
5674
5675 while (1) {
2c47e605
YZ
5676 wret = walk_down_tree(trans, root, path, wc);
5677 if (wret < 0) {
f82d02d9 5678 ret = wret;
f82d02d9 5679 break;
2c47e605 5680 }
f82d02d9 5681
2c47e605 5682 wret = walk_up_tree(trans, root, path, wc, parent_level);
f82d02d9
YZ
5683 if (wret < 0)
5684 ret = wret;
5685 if (wret != 0)
5686 break;
5687 }
5688
2c47e605 5689 kfree(wc);
f82d02d9
YZ
5690 btrfs_free_path(path);
5691 return ret;
5692}
5693
5d4f98a2 5694#if 0
8e7bf94f
CM
5695static unsigned long calc_ra(unsigned long start, unsigned long last,
5696 unsigned long nr)
5697{
5698 return min(last, start + nr - 1);
5699}
5700
d397712b 5701static noinline int relocate_inode_pages(struct inode *inode, u64 start,
98ed5174 5702 u64 len)
edbd8d4e
CM
5703{
5704 u64 page_start;
5705 u64 page_end;
1a40e23b 5706 unsigned long first_index;
edbd8d4e 5707 unsigned long last_index;
edbd8d4e
CM
5708 unsigned long i;
5709 struct page *page;
d1310b2e 5710 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4313b399 5711 struct file_ra_state *ra;
3eaa2885 5712 struct btrfs_ordered_extent *ordered;
1a40e23b
ZY
5713 unsigned int total_read = 0;
5714 unsigned int total_dirty = 0;
5715 int ret = 0;
4313b399
CM
5716
5717 ra = kzalloc(sizeof(*ra), GFP_NOFS);
edbd8d4e
CM
5718
5719 mutex_lock(&inode->i_mutex);
1a40e23b 5720 first_index = start >> PAGE_CACHE_SHIFT;
edbd8d4e
CM
5721 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5722
1a40e23b
ZY
5723 /* make sure the dirty trick played by the caller work */
5724 ret = invalidate_inode_pages2_range(inode->i_mapping,
5725 first_index, last_index);
5726 if (ret)
5727 goto out_unlock;
8e7bf94f 5728
4313b399 5729 file_ra_state_init(ra, inode->i_mapping);
edbd8d4e 5730
1a40e23b
ZY
5731 for (i = first_index ; i <= last_index; i++) {
5732 if (total_read % ra->ra_pages == 0) {
8e7bf94f 5733 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
1a40e23b 5734 calc_ra(i, last_index, ra->ra_pages));
8e7bf94f
CM
5735 }
5736 total_read++;
3eaa2885
CM
5737again:
5738 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
1a40e23b 5739 BUG_ON(1);
edbd8d4e 5740 page = grab_cache_page(inode->i_mapping, i);
a061fc8d 5741 if (!page) {
1a40e23b 5742 ret = -ENOMEM;
edbd8d4e 5743 goto out_unlock;
a061fc8d 5744 }
edbd8d4e
CM
5745 if (!PageUptodate(page)) {
5746 btrfs_readpage(NULL, page);
5747 lock_page(page);
5748 if (!PageUptodate(page)) {
5749 unlock_page(page);
5750 page_cache_release(page);
1a40e23b 5751 ret = -EIO;
edbd8d4e
CM
5752 goto out_unlock;
5753 }
5754 }
ec44a35c 5755 wait_on_page_writeback(page);
3eaa2885 5756
edbd8d4e
CM
5757 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5758 page_end = page_start + PAGE_CACHE_SIZE - 1;
d1310b2e 5759 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e 5760
3eaa2885
CM
5761 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5762 if (ordered) {
5763 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5764 unlock_page(page);
5765 page_cache_release(page);
5766 btrfs_start_ordered_extent(inode, ordered, 1);
5767 btrfs_put_ordered_extent(ordered);
5768 goto again;
5769 }
5770 set_page_extent_mapped(page);
5771
1a40e23b
ZY
5772 if (i == first_index)
5773 set_extent_bits(io_tree, page_start, page_end,
5774 EXTENT_BOUNDARY, GFP_NOFS);
1f80e4db 5775 btrfs_set_extent_delalloc(inode, page_start, page_end);
1a40e23b 5776
a061fc8d 5777 set_page_dirty(page);
1a40e23b 5778 total_dirty++;
edbd8d4e 5779
d1310b2e 5780 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e
CM
5781 unlock_page(page);
5782 page_cache_release(page);
5783 }
5784
5785out_unlock:
ec44a35c 5786 kfree(ra);
edbd8d4e 5787 mutex_unlock(&inode->i_mutex);
1a40e23b
ZY
5788 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5789 return ret;
edbd8d4e
CM
5790}
5791
d397712b 5792static noinline int relocate_data_extent(struct inode *reloc_inode,
1a40e23b
ZY
5793 struct btrfs_key *extent_key,
5794 u64 offset)
5795{
5796 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5797 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5798 struct extent_map *em;
6643558d
YZ
5799 u64 start = extent_key->objectid - offset;
5800 u64 end = start + extent_key->offset - 1;
bf4ef679 5801
1a40e23b
ZY
5802 em = alloc_extent_map(GFP_NOFS);
5803 BUG_ON(!em || IS_ERR(em));
bf4ef679 5804
6643558d 5805 em->start = start;
1a40e23b 5806 em->len = extent_key->offset;
c8b97818 5807 em->block_len = extent_key->offset;
1a40e23b
ZY
5808 em->block_start = extent_key->objectid;
5809 em->bdev = root->fs_info->fs_devices->latest_bdev;
5810 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5811
5812 /* setup extent map to cheat btrfs_readpage */
6643558d 5813 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
1a40e23b
ZY
5814 while (1) {
5815 int ret;
890871be 5816 write_lock(&em_tree->lock);
1a40e23b 5817 ret = add_extent_mapping(em_tree, em);
890871be 5818 write_unlock(&em_tree->lock);
1a40e23b
ZY
5819 if (ret != -EEXIST) {
5820 free_extent_map(em);
bf4ef679
CM
5821 break;
5822 }
6643558d 5823 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
bf4ef679 5824 }
6643558d 5825 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
bf4ef679 5826
6643558d 5827 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
1a40e23b 5828}
edbd8d4e 5829
1a40e23b
ZY
5830struct btrfs_ref_path {
5831 u64 extent_start;
5832 u64 nodes[BTRFS_MAX_LEVEL];
5833 u64 root_objectid;
5834 u64 root_generation;
5835 u64 owner_objectid;
1a40e23b
ZY
5836 u32 num_refs;
5837 int lowest_level;
5838 int current_level;
f82d02d9
YZ
5839 int shared_level;
5840
5841 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5842 u64 new_nodes[BTRFS_MAX_LEVEL];
1a40e23b 5843};
7d9eb12c 5844
1a40e23b 5845struct disk_extent {
c8b97818 5846 u64 ram_bytes;
1a40e23b
ZY
5847 u64 disk_bytenr;
5848 u64 disk_num_bytes;
5849 u64 offset;
5850 u64 num_bytes;
c8b97818
CM
5851 u8 compression;
5852 u8 encryption;
5853 u16 other_encoding;
1a40e23b 5854};
4313b399 5855
1a40e23b
ZY
5856static int is_cowonly_root(u64 root_objectid)
5857{
5858 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5859 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5860 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5861 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
0403e47e
YZ
5862 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5863 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
1a40e23b
ZY
5864 return 1;
5865 return 0;
5866}
edbd8d4e 5867
d397712b 5868static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
5869 struct btrfs_root *extent_root,
5870 struct btrfs_ref_path *ref_path,
5871 int first_time)
5872{
5873 struct extent_buffer *leaf;
5874 struct btrfs_path *path;
5875 struct btrfs_extent_ref *ref;
5876 struct btrfs_key key;
5877 struct btrfs_key found_key;
5878 u64 bytenr;
5879 u32 nritems;
5880 int level;
5881 int ret = 1;
edbd8d4e 5882
1a40e23b
ZY
5883 path = btrfs_alloc_path();
5884 if (!path)
5885 return -ENOMEM;
bf4ef679 5886
1a40e23b
ZY
5887 if (first_time) {
5888 ref_path->lowest_level = -1;
5889 ref_path->current_level = -1;
f82d02d9 5890 ref_path->shared_level = -1;
1a40e23b
ZY
5891 goto walk_up;
5892 }
5893walk_down:
5894 level = ref_path->current_level - 1;
5895 while (level >= -1) {
5896 u64 parent;
5897 if (level < ref_path->lowest_level)
5898 break;
bf4ef679 5899
d397712b 5900 if (level >= 0)
1a40e23b 5901 bytenr = ref_path->nodes[level];
d397712b 5902 else
1a40e23b 5903 bytenr = ref_path->extent_start;
1a40e23b 5904 BUG_ON(bytenr == 0);
bf4ef679 5905
1a40e23b
ZY
5906 parent = ref_path->nodes[level + 1];
5907 ref_path->nodes[level + 1] = 0;
5908 ref_path->current_level = level;
5909 BUG_ON(parent == 0);
0ef3e66b 5910
1a40e23b
ZY
5911 key.objectid = bytenr;
5912 key.offset = parent + 1;
5913 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 5914
1a40e23b
ZY
5915 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5916 if (ret < 0)
edbd8d4e 5917 goto out;
1a40e23b 5918 BUG_ON(ret == 0);
7d9eb12c 5919
1a40e23b
ZY
5920 leaf = path->nodes[0];
5921 nritems = btrfs_header_nritems(leaf);
5922 if (path->slots[0] >= nritems) {
5923 ret = btrfs_next_leaf(extent_root, path);
5924 if (ret < 0)
5925 goto out;
5926 if (ret > 0)
5927 goto next;
5928 leaf = path->nodes[0];
5929 }
0ef3e66b 5930
1a40e23b
ZY
5931 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5932 if (found_key.objectid == bytenr &&
f82d02d9
YZ
5933 found_key.type == BTRFS_EXTENT_REF_KEY) {
5934 if (level < ref_path->shared_level)
5935 ref_path->shared_level = level;
1a40e23b 5936 goto found;
f82d02d9 5937 }
1a40e23b
ZY
5938next:
5939 level--;
5940 btrfs_release_path(extent_root, path);
d899e052 5941 cond_resched();
1a40e23b
ZY
5942 }
5943 /* reached lowest level */
5944 ret = 1;
5945 goto out;
5946walk_up:
5947 level = ref_path->current_level;
5948 while (level < BTRFS_MAX_LEVEL - 1) {
5949 u64 ref_objectid;
d397712b
CM
5950
5951 if (level >= 0)
1a40e23b 5952 bytenr = ref_path->nodes[level];
d397712b 5953 else
1a40e23b 5954 bytenr = ref_path->extent_start;
d397712b 5955
1a40e23b 5956 BUG_ON(bytenr == 0);
edbd8d4e 5957
1a40e23b
ZY
5958 key.objectid = bytenr;
5959 key.offset = 0;
5960 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 5961
1a40e23b
ZY
5962 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5963 if (ret < 0)
5964 goto out;
edbd8d4e 5965
1a40e23b
ZY
5966 leaf = path->nodes[0];
5967 nritems = btrfs_header_nritems(leaf);
5968 if (path->slots[0] >= nritems) {
5969 ret = btrfs_next_leaf(extent_root, path);
5970 if (ret < 0)
5971 goto out;
5972 if (ret > 0) {
5973 /* the extent was freed by someone */
5974 if (ref_path->lowest_level == level)
5975 goto out;
5976 btrfs_release_path(extent_root, path);
5977 goto walk_down;
5978 }
5979 leaf = path->nodes[0];
5980 }
edbd8d4e 5981
1a40e23b
ZY
5982 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5983 if (found_key.objectid != bytenr ||
5984 found_key.type != BTRFS_EXTENT_REF_KEY) {
5985 /* the extent was freed by someone */
5986 if (ref_path->lowest_level == level) {
5987 ret = 1;
5988 goto out;
5989 }
5990 btrfs_release_path(extent_root, path);
5991 goto walk_down;
5992 }
5993found:
5994 ref = btrfs_item_ptr(leaf, path->slots[0],
5995 struct btrfs_extent_ref);
5996 ref_objectid = btrfs_ref_objectid(leaf, ref);
5997 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5998 if (first_time) {
5999 level = (int)ref_objectid;
6000 BUG_ON(level >= BTRFS_MAX_LEVEL);
6001 ref_path->lowest_level = level;
6002 ref_path->current_level = level;
6003 ref_path->nodes[level] = bytenr;
6004 } else {
6005 WARN_ON(ref_objectid != level);
6006 }
6007 } else {
6008 WARN_ON(level != -1);
6009 }
6010 first_time = 0;
bf4ef679 6011
1a40e23b
ZY
6012 if (ref_path->lowest_level == level) {
6013 ref_path->owner_objectid = ref_objectid;
1a40e23b
ZY
6014 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6015 }
bf4ef679 6016
7d9eb12c 6017 /*
1a40e23b
ZY
6018 * the block is tree root or the block isn't in reference
6019 * counted tree.
7d9eb12c 6020 */
1a40e23b
ZY
6021 if (found_key.objectid == found_key.offset ||
6022 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6023 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6024 ref_path->root_generation =
6025 btrfs_ref_generation(leaf, ref);
6026 if (level < 0) {
6027 /* special reference from the tree log */
6028 ref_path->nodes[0] = found_key.offset;
6029 ref_path->current_level = 0;
6030 }
6031 ret = 0;
6032 goto out;
6033 }
7d9eb12c 6034
1a40e23b
ZY
6035 level++;
6036 BUG_ON(ref_path->nodes[level] != 0);
6037 ref_path->nodes[level] = found_key.offset;
6038 ref_path->current_level = level;
bf4ef679 6039
1a40e23b
ZY
6040 /*
6041 * the reference was created in the running transaction,
6042 * no need to continue walking up.
6043 */
6044 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6045 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6046 ref_path->root_generation =
6047 btrfs_ref_generation(leaf, ref);
6048 ret = 0;
6049 goto out;
7d9eb12c
CM
6050 }
6051
1a40e23b 6052 btrfs_release_path(extent_root, path);
d899e052 6053 cond_resched();
7d9eb12c 6054 }
1a40e23b
ZY
6055 /* reached max tree level, but no tree root found. */
6056 BUG();
edbd8d4e 6057out:
1a40e23b
ZY
6058 btrfs_free_path(path);
6059 return ret;
edbd8d4e
CM
6060}
6061
1a40e23b
ZY
6062static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6063 struct btrfs_root *extent_root,
6064 struct btrfs_ref_path *ref_path,
6065 u64 extent_start)
a061fc8d 6066{
1a40e23b
ZY
6067 memset(ref_path, 0, sizeof(*ref_path));
6068 ref_path->extent_start = extent_start;
a061fc8d 6069
1a40e23b 6070 return __next_ref_path(trans, extent_root, ref_path, 1);
a061fc8d
CM
6071}
6072
1a40e23b
ZY
6073static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6074 struct btrfs_root *extent_root,
6075 struct btrfs_ref_path *ref_path)
edbd8d4e 6076{
1a40e23b
ZY
6077 return __next_ref_path(trans, extent_root, ref_path, 0);
6078}
6079
d397712b 6080static noinline int get_new_locations(struct inode *reloc_inode,
1a40e23b
ZY
6081 struct btrfs_key *extent_key,
6082 u64 offset, int no_fragment,
6083 struct disk_extent **extents,
6084 int *nr_extents)
6085{
6086 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6087 struct btrfs_path *path;
6088 struct btrfs_file_extent_item *fi;
edbd8d4e 6089 struct extent_buffer *leaf;
1a40e23b
ZY
6090 struct disk_extent *exts = *extents;
6091 struct btrfs_key found_key;
6092 u64 cur_pos;
6093 u64 last_byte;
edbd8d4e 6094 u32 nritems;
1a40e23b
ZY
6095 int nr = 0;
6096 int max = *nr_extents;
6097 int ret;
edbd8d4e 6098
1a40e23b
ZY
6099 WARN_ON(!no_fragment && *extents);
6100 if (!exts) {
6101 max = 1;
6102 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6103 if (!exts)
6104 return -ENOMEM;
a061fc8d 6105 }
edbd8d4e 6106
1a40e23b
ZY
6107 path = btrfs_alloc_path();
6108 BUG_ON(!path);
edbd8d4e 6109
1a40e23b
ZY
6110 cur_pos = extent_key->objectid - offset;
6111 last_byte = extent_key->objectid + extent_key->offset;
6112 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6113 cur_pos, 0);
6114 if (ret < 0)
6115 goto out;
6116 if (ret > 0) {
6117 ret = -ENOENT;
6118 goto out;
6119 }
edbd8d4e 6120
1a40e23b 6121 while (1) {
edbd8d4e
CM
6122 leaf = path->nodes[0];
6123 nritems = btrfs_header_nritems(leaf);
1a40e23b
ZY
6124 if (path->slots[0] >= nritems) {
6125 ret = btrfs_next_leaf(root, path);
a061fc8d
CM
6126 if (ret < 0)
6127 goto out;
1a40e23b
ZY
6128 if (ret > 0)
6129 break;
bf4ef679 6130 leaf = path->nodes[0];
a061fc8d 6131 }
edbd8d4e
CM
6132
6133 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1a40e23b
ZY
6134 if (found_key.offset != cur_pos ||
6135 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6136 found_key.objectid != reloc_inode->i_ino)
edbd8d4e
CM
6137 break;
6138
1a40e23b
ZY
6139 fi = btrfs_item_ptr(leaf, path->slots[0],
6140 struct btrfs_file_extent_item);
6141 if (btrfs_file_extent_type(leaf, fi) !=
6142 BTRFS_FILE_EXTENT_REG ||
6143 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
edbd8d4e 6144 break;
1a40e23b
ZY
6145
6146 if (nr == max) {
6147 struct disk_extent *old = exts;
6148 max *= 2;
6149 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6150 memcpy(exts, old, sizeof(*exts) * nr);
6151 if (old != *extents)
6152 kfree(old);
a061fc8d 6153 }
edbd8d4e 6154
1a40e23b
ZY
6155 exts[nr].disk_bytenr =
6156 btrfs_file_extent_disk_bytenr(leaf, fi);
6157 exts[nr].disk_num_bytes =
6158 btrfs_file_extent_disk_num_bytes(leaf, fi);
6159 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6160 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
c8b97818
CM
6161 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6162 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6163 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6164 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6165 fi);
d899e052
YZ
6166 BUG_ON(exts[nr].offset > 0);
6167 BUG_ON(exts[nr].compression || exts[nr].encryption);
6168 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
edbd8d4e 6169
1a40e23b
ZY
6170 cur_pos += exts[nr].num_bytes;
6171 nr++;
6172
6173 if (cur_pos + offset >= last_byte)
6174 break;
6175
6176 if (no_fragment) {
6177 ret = 1;
edbd8d4e 6178 goto out;
1a40e23b
ZY
6179 }
6180 path->slots[0]++;
6181 }
6182
1f80e4db 6183 BUG_ON(cur_pos + offset > last_byte);
1a40e23b
ZY
6184 if (cur_pos + offset < last_byte) {
6185 ret = -ENOENT;
6186 goto out;
edbd8d4e
CM
6187 }
6188 ret = 0;
6189out:
1a40e23b
ZY
6190 btrfs_free_path(path);
6191 if (ret) {
6192 if (exts != *extents)
6193 kfree(exts);
6194 } else {
6195 *extents = exts;
6196 *nr_extents = nr;
6197 }
6198 return ret;
6199}
6200
d397712b 6201static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6202 struct btrfs_root *root,
6203 struct btrfs_path *path,
6204 struct btrfs_key *extent_key,
6205 struct btrfs_key *leaf_key,
6206 struct btrfs_ref_path *ref_path,
6207 struct disk_extent *new_extents,
6208 int nr_extents)
6209{
6210 struct extent_buffer *leaf;
6211 struct btrfs_file_extent_item *fi;
6212 struct inode *inode = NULL;
6213 struct btrfs_key key;
6214 u64 lock_start = 0;
6215 u64 lock_end = 0;
6216 u64 num_bytes;
6217 u64 ext_offset;
86288a19 6218 u64 search_end = (u64)-1;
1a40e23b 6219 u32 nritems;
3bb1a1bc 6220 int nr_scaned = 0;
1a40e23b 6221 int extent_locked = 0;
d899e052 6222 int extent_type;
1a40e23b
ZY
6223 int ret;
6224
3bb1a1bc 6225 memcpy(&key, leaf_key, sizeof(key));
1a40e23b 6226 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3bb1a1bc
YZ
6227 if (key.objectid < ref_path->owner_objectid ||
6228 (key.objectid == ref_path->owner_objectid &&
6229 key.type < BTRFS_EXTENT_DATA_KEY)) {
6230 key.objectid = ref_path->owner_objectid;
6231 key.type = BTRFS_EXTENT_DATA_KEY;
6232 key.offset = 0;
6233 }
1a40e23b
ZY
6234 }
6235
6236 while (1) {
6237 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6238 if (ret < 0)
6239 goto out;
6240
6241 leaf = path->nodes[0];
6242 nritems = btrfs_header_nritems(leaf);
6243next:
6244 if (extent_locked && ret > 0) {
6245 /*
6246 * the file extent item was modified by someone
6247 * before the extent got locked.
6248 */
1a40e23b
ZY
6249 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6250 lock_end, GFP_NOFS);
6251 extent_locked = 0;
6252 }
6253
6254 if (path->slots[0] >= nritems) {
3bb1a1bc 6255 if (++nr_scaned > 2)
1a40e23b
ZY
6256 break;
6257
6258 BUG_ON(extent_locked);
6259 ret = btrfs_next_leaf(root, path);
6260 if (ret < 0)
6261 goto out;
6262 if (ret > 0)
6263 break;
6264 leaf = path->nodes[0];
6265 nritems = btrfs_header_nritems(leaf);
6266 }
6267
6268 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6269
6270 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6271 if ((key.objectid > ref_path->owner_objectid) ||
6272 (key.objectid == ref_path->owner_objectid &&
6273 key.type > BTRFS_EXTENT_DATA_KEY) ||
86288a19 6274 key.offset >= search_end)
1a40e23b
ZY
6275 break;
6276 }
6277
6278 if (inode && key.objectid != inode->i_ino) {
6279 BUG_ON(extent_locked);
6280 btrfs_release_path(root, path);
6281 mutex_unlock(&inode->i_mutex);
6282 iput(inode);
6283 inode = NULL;
6284 continue;
6285 }
6286
6287 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6288 path->slots[0]++;
6289 ret = 1;
6290 goto next;
6291 }
6292 fi = btrfs_item_ptr(leaf, path->slots[0],
6293 struct btrfs_file_extent_item);
d899e052
YZ
6294 extent_type = btrfs_file_extent_type(leaf, fi);
6295 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6296 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
1a40e23b
ZY
6297 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6298 extent_key->objectid)) {
6299 path->slots[0]++;
6300 ret = 1;
6301 goto next;
6302 }
6303
6304 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6305 ext_offset = btrfs_file_extent_offset(leaf, fi);
6306
86288a19
YZ
6307 if (search_end == (u64)-1) {
6308 search_end = key.offset - ext_offset +
6309 btrfs_file_extent_ram_bytes(leaf, fi);
6310 }
1a40e23b
ZY
6311
6312 if (!extent_locked) {
6313 lock_start = key.offset;
6314 lock_end = lock_start + num_bytes - 1;
6315 } else {
6643558d
YZ
6316 if (lock_start > key.offset ||
6317 lock_end + 1 < key.offset + num_bytes) {
6318 unlock_extent(&BTRFS_I(inode)->io_tree,
6319 lock_start, lock_end, GFP_NOFS);
6320 extent_locked = 0;
6321 }
1a40e23b
ZY
6322 }
6323
6324 if (!inode) {
6325 btrfs_release_path(root, path);
6326
6327 inode = btrfs_iget_locked(root->fs_info->sb,
6328 key.objectid, root);
6329 if (inode->i_state & I_NEW) {
6330 BTRFS_I(inode)->root = root;
6331 BTRFS_I(inode)->location.objectid =
6332 key.objectid;
6333 BTRFS_I(inode)->location.type =
6334 BTRFS_INODE_ITEM_KEY;
6335 BTRFS_I(inode)->location.offset = 0;
6336 btrfs_read_locked_inode(inode);
6337 unlock_new_inode(inode);
6338 }
6339 /*
6340 * some code call btrfs_commit_transaction while
6341 * holding the i_mutex, so we can't use mutex_lock
6342 * here.
6343 */
6344 if (is_bad_inode(inode) ||
6345 !mutex_trylock(&inode->i_mutex)) {
6346 iput(inode);
6347 inode = NULL;
6348 key.offset = (u64)-1;
6349 goto skip;
6350 }
6351 }
6352
6353 if (!extent_locked) {
6354 struct btrfs_ordered_extent *ordered;
6355
6356 btrfs_release_path(root, path);
6357
6358 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6359 lock_end, GFP_NOFS);
6360 ordered = btrfs_lookup_first_ordered_extent(inode,
6361 lock_end);
6362 if (ordered &&
6363 ordered->file_offset <= lock_end &&
6364 ordered->file_offset + ordered->len > lock_start) {
6365 unlock_extent(&BTRFS_I(inode)->io_tree,
6366 lock_start, lock_end, GFP_NOFS);
6367 btrfs_start_ordered_extent(inode, ordered, 1);
6368 btrfs_put_ordered_extent(ordered);
6369 key.offset += num_bytes;
6370 goto skip;
6371 }
6372 if (ordered)
6373 btrfs_put_ordered_extent(ordered);
6374
1a40e23b
ZY
6375 extent_locked = 1;
6376 continue;
6377 }
6378
6379 if (nr_extents == 1) {
6380 /* update extent pointer in place */
1a40e23b
ZY
6381 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6382 new_extents[0].disk_bytenr);
6383 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6384 new_extents[0].disk_num_bytes);
1a40e23b
ZY
6385 btrfs_mark_buffer_dirty(leaf);
6386
6387 btrfs_drop_extent_cache(inode, key.offset,
6388 key.offset + num_bytes - 1, 0);
6389
6390 ret = btrfs_inc_extent_ref(trans, root,
6391 new_extents[0].disk_bytenr,
6392 new_extents[0].disk_num_bytes,
6393 leaf->start,
6394 root->root_key.objectid,
6395 trans->transid,
3bb1a1bc 6396 key.objectid);
1a40e23b
ZY
6397 BUG_ON(ret);
6398
6399 ret = btrfs_free_extent(trans, root,
6400 extent_key->objectid,
6401 extent_key->offset,
6402 leaf->start,
6403 btrfs_header_owner(leaf),
6404 btrfs_header_generation(leaf),
3bb1a1bc 6405 key.objectid, 0);
1a40e23b
ZY
6406 BUG_ON(ret);
6407
6408 btrfs_release_path(root, path);
6409 key.offset += num_bytes;
6410 } else {
d899e052
YZ
6411 BUG_ON(1);
6412#if 0
1a40e23b
ZY
6413 u64 alloc_hint;
6414 u64 extent_len;
6415 int i;
6416 /*
6417 * drop old extent pointer at first, then insert the
6418 * new pointers one bye one
6419 */
6420 btrfs_release_path(root, path);
6421 ret = btrfs_drop_extents(trans, root, inode, key.offset,
6422 key.offset + num_bytes,
6423 key.offset, &alloc_hint);
6424 BUG_ON(ret);
6425
6426 for (i = 0; i < nr_extents; i++) {
6427 if (ext_offset >= new_extents[i].num_bytes) {
6428 ext_offset -= new_extents[i].num_bytes;
6429 continue;
6430 }
6431 extent_len = min(new_extents[i].num_bytes -
6432 ext_offset, num_bytes);
6433
6434 ret = btrfs_insert_empty_item(trans, root,
6435 path, &key,
6436 sizeof(*fi));
6437 BUG_ON(ret);
6438
6439 leaf = path->nodes[0];
6440 fi = btrfs_item_ptr(leaf, path->slots[0],
6441 struct btrfs_file_extent_item);
6442 btrfs_set_file_extent_generation(leaf, fi,
6443 trans->transid);
6444 btrfs_set_file_extent_type(leaf, fi,
6445 BTRFS_FILE_EXTENT_REG);
6446 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6447 new_extents[i].disk_bytenr);
6448 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6449 new_extents[i].disk_num_bytes);
c8b97818
CM
6450 btrfs_set_file_extent_ram_bytes(leaf, fi,
6451 new_extents[i].ram_bytes);
6452
6453 btrfs_set_file_extent_compression(leaf, fi,
6454 new_extents[i].compression);
6455 btrfs_set_file_extent_encryption(leaf, fi,
6456 new_extents[i].encryption);
6457 btrfs_set_file_extent_other_encoding(leaf, fi,
6458 new_extents[i].other_encoding);
6459
1a40e23b
ZY
6460 btrfs_set_file_extent_num_bytes(leaf, fi,
6461 extent_len);
6462 ext_offset += new_extents[i].offset;
6463 btrfs_set_file_extent_offset(leaf, fi,
6464 ext_offset);
6465 btrfs_mark_buffer_dirty(leaf);
6466
6467 btrfs_drop_extent_cache(inode, key.offset,
6468 key.offset + extent_len - 1, 0);
6469
6470 ret = btrfs_inc_extent_ref(trans, root,
6471 new_extents[i].disk_bytenr,
6472 new_extents[i].disk_num_bytes,
6473 leaf->start,
6474 root->root_key.objectid,
3bb1a1bc 6475 trans->transid, key.objectid);
1a40e23b
ZY
6476 BUG_ON(ret);
6477 btrfs_release_path(root, path);
6478
a76a3cd4 6479 inode_add_bytes(inode, extent_len);
1a40e23b
ZY
6480
6481 ext_offset = 0;
6482 num_bytes -= extent_len;
6483 key.offset += extent_len;
6484
6485 if (num_bytes == 0)
6486 break;
6487 }
6488 BUG_ON(i >= nr_extents);
d899e052 6489#endif
1a40e23b
ZY
6490 }
6491
6492 if (extent_locked) {
1a40e23b
ZY
6493 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6494 lock_end, GFP_NOFS);
6495 extent_locked = 0;
6496 }
6497skip:
6498 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
86288a19 6499 key.offset >= search_end)
1a40e23b
ZY
6500 break;
6501
6502 cond_resched();
6503 }
6504 ret = 0;
6505out:
6506 btrfs_release_path(root, path);
6507 if (inode) {
6508 mutex_unlock(&inode->i_mutex);
6509 if (extent_locked) {
1a40e23b
ZY
6510 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6511 lock_end, GFP_NOFS);
6512 }
6513 iput(inode);
6514 }
6515 return ret;
6516}
6517
1a40e23b
ZY
6518int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6519 struct btrfs_root *root,
6520 struct extent_buffer *buf, u64 orig_start)
6521{
6522 int level;
6523 int ret;
6524
6525 BUG_ON(btrfs_header_generation(buf) != trans->transid);
6526 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6527
6528 level = btrfs_header_level(buf);
6529 if (level == 0) {
6530 struct btrfs_leaf_ref *ref;
6531 struct btrfs_leaf_ref *orig_ref;
6532
6533 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6534 if (!orig_ref)
6535 return -ENOENT;
6536
6537 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6538 if (!ref) {
6539 btrfs_free_leaf_ref(root, orig_ref);
6540 return -ENOMEM;
6541 }
6542
6543 ref->nritems = orig_ref->nritems;
6544 memcpy(ref->extents, orig_ref->extents,
6545 sizeof(ref->extents[0]) * ref->nritems);
6546
6547 btrfs_free_leaf_ref(root, orig_ref);
6548
6549 ref->root_gen = trans->transid;
6550 ref->bytenr = buf->start;
6551 ref->owner = btrfs_header_owner(buf);
6552 ref->generation = btrfs_header_generation(buf);
bd56b302 6553
1a40e23b
ZY
6554 ret = btrfs_add_leaf_ref(root, ref, 0);
6555 WARN_ON(ret);
6556 btrfs_free_leaf_ref(root, ref);
6557 }
6558 return 0;
6559}
6560
d397712b 6561static noinline int invalidate_extent_cache(struct btrfs_root *root,
1a40e23b
ZY
6562 struct extent_buffer *leaf,
6563 struct btrfs_block_group_cache *group,
6564 struct btrfs_root *target_root)
6565{
6566 struct btrfs_key key;
6567 struct inode *inode = NULL;
6568 struct btrfs_file_extent_item *fi;
2ac55d41 6569 struct extent_state *cached_state = NULL;
1a40e23b
ZY
6570 u64 num_bytes;
6571 u64 skip_objectid = 0;
6572 u32 nritems;
6573 u32 i;
6574
6575 nritems = btrfs_header_nritems(leaf);
6576 for (i = 0; i < nritems; i++) {
6577 btrfs_item_key_to_cpu(leaf, &key, i);
6578 if (key.objectid == skip_objectid ||
6579 key.type != BTRFS_EXTENT_DATA_KEY)
6580 continue;
6581 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6582 if (btrfs_file_extent_type(leaf, fi) ==
6583 BTRFS_FILE_EXTENT_INLINE)
6584 continue;
6585 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6586 continue;
6587 if (!inode || inode->i_ino != key.objectid) {
6588 iput(inode);
6589 inode = btrfs_ilookup(target_root->fs_info->sb,
6590 key.objectid, target_root, 1);
6591 }
6592 if (!inode) {
6593 skip_objectid = key.objectid;
6594 continue;
6595 }
6596 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6597
2ac55d41
JB
6598 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
6599 key.offset + num_bytes - 1, 0, &cached_state,
6600 GFP_NOFS);
1a40e23b
ZY
6601 btrfs_drop_extent_cache(inode, key.offset,
6602 key.offset + num_bytes - 1, 1);
2ac55d41
JB
6603 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
6604 key.offset + num_bytes - 1, &cached_state,
6605 GFP_NOFS);
1a40e23b
ZY
6606 cond_resched();
6607 }
6608 iput(inode);
6609 return 0;
6610}
6611
d397712b 6612static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6613 struct btrfs_root *root,
6614 struct extent_buffer *leaf,
6615 struct btrfs_block_group_cache *group,
6616 struct inode *reloc_inode)
6617{
6618 struct btrfs_key key;
6619 struct btrfs_key extent_key;
6620 struct btrfs_file_extent_item *fi;
6621 struct btrfs_leaf_ref *ref;
6622 struct disk_extent *new_extent;
6623 u64 bytenr;
6624 u64 num_bytes;
6625 u32 nritems;
6626 u32 i;
6627 int ext_index;
6628 int nr_extent;
6629 int ret;
6630
6631 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6632 BUG_ON(!new_extent);
6633
6634 ref = btrfs_lookup_leaf_ref(root, leaf->start);
6635 BUG_ON(!ref);
6636
6637 ext_index = -1;
6638 nritems = btrfs_header_nritems(leaf);
6639 for (i = 0; i < nritems; i++) {
6640 btrfs_item_key_to_cpu(leaf, &key, i);
6641 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6642 continue;
6643 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6644 if (btrfs_file_extent_type(leaf, fi) ==
6645 BTRFS_FILE_EXTENT_INLINE)
6646 continue;
6647 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6648 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6649 if (bytenr == 0)
6650 continue;
6651
6652 ext_index++;
6653 if (bytenr >= group->key.objectid + group->key.offset ||
6654 bytenr + num_bytes <= group->key.objectid)
6655 continue;
6656
6657 extent_key.objectid = bytenr;
6658 extent_key.offset = num_bytes;
6659 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6660 nr_extent = 1;
6661 ret = get_new_locations(reloc_inode, &extent_key,
6662 group->key.objectid, 1,
6663 &new_extent, &nr_extent);
6664 if (ret > 0)
6665 continue;
6666 BUG_ON(ret < 0);
6667
6668 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6669 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6670 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6671 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6672
1a40e23b
ZY
6673 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6674 new_extent->disk_bytenr);
6675 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6676 new_extent->disk_num_bytes);
1a40e23b
ZY
6677 btrfs_mark_buffer_dirty(leaf);
6678
6679 ret = btrfs_inc_extent_ref(trans, root,
6680 new_extent->disk_bytenr,
6681 new_extent->disk_num_bytes,
6682 leaf->start,
6683 root->root_key.objectid,
3bb1a1bc 6684 trans->transid, key.objectid);
1a40e23b 6685 BUG_ON(ret);
56bec294 6686
1a40e23b
ZY
6687 ret = btrfs_free_extent(trans, root,
6688 bytenr, num_bytes, leaf->start,
6689 btrfs_header_owner(leaf),
6690 btrfs_header_generation(leaf),
3bb1a1bc 6691 key.objectid, 0);
1a40e23b
ZY
6692 BUG_ON(ret);
6693 cond_resched();
6694 }
6695 kfree(new_extent);
6696 BUG_ON(ext_index + 1 != ref->nritems);
6697 btrfs_free_leaf_ref(root, ref);
6698 return 0;
6699}
6700
f82d02d9
YZ
6701int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6702 struct btrfs_root *root)
1a40e23b
ZY
6703{
6704 struct btrfs_root *reloc_root;
f82d02d9 6705 int ret;
1a40e23b
ZY
6706
6707 if (root->reloc_root) {
6708 reloc_root = root->reloc_root;
6709 root->reloc_root = NULL;
6710 list_add(&reloc_root->dead_list,
6711 &root->fs_info->dead_reloc_roots);
f82d02d9
YZ
6712
6713 btrfs_set_root_bytenr(&reloc_root->root_item,
6714 reloc_root->node->start);
6715 btrfs_set_root_level(&root->root_item,
6716 btrfs_header_level(reloc_root->node));
6717 memset(&reloc_root->root_item.drop_progress, 0,
6718 sizeof(struct btrfs_disk_key));
6719 reloc_root->root_item.drop_level = 0;
6720
6721 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6722 &reloc_root->root_key,
6723 &reloc_root->root_item);
6724 BUG_ON(ret);
1a40e23b
ZY
6725 }
6726 return 0;
6727}
6728
6729int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6730{
6731 struct btrfs_trans_handle *trans;
6732 struct btrfs_root *reloc_root;
6733 struct btrfs_root *prev_root = NULL;
6734 struct list_head dead_roots;
6735 int ret;
6736 unsigned long nr;
6737
6738 INIT_LIST_HEAD(&dead_roots);
6739 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6740
6741 while (!list_empty(&dead_roots)) {
6742 reloc_root = list_entry(dead_roots.prev,
6743 struct btrfs_root, dead_list);
6744 list_del_init(&reloc_root->dead_list);
6745
6746 BUG_ON(reloc_root->commit_root != NULL);
6747 while (1) {
6748 trans = btrfs_join_transaction(root, 1);
6749 BUG_ON(!trans);
6750
6751 mutex_lock(&root->fs_info->drop_mutex);
6752 ret = btrfs_drop_snapshot(trans, reloc_root);
6753 if (ret != -EAGAIN)
6754 break;
6755 mutex_unlock(&root->fs_info->drop_mutex);
6756
6757 nr = trans->blocks_used;
6758 ret = btrfs_end_transaction(trans, root);
6759 BUG_ON(ret);
6760 btrfs_btree_balance_dirty(root, nr);
6761 }
6762
6763 free_extent_buffer(reloc_root->node);
6764
6765 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6766 &reloc_root->root_key);
6767 BUG_ON(ret);
6768 mutex_unlock(&root->fs_info->drop_mutex);
6769
6770 nr = trans->blocks_used;
6771 ret = btrfs_end_transaction(trans, root);
6772 BUG_ON(ret);
6773 btrfs_btree_balance_dirty(root, nr);
6774
6775 kfree(prev_root);
6776 prev_root = reloc_root;
6777 }
6778 if (prev_root) {
6779 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6780 kfree(prev_root);
6781 }
6782 return 0;
6783}
6784
6785int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6786{
6787 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6788 return 0;
6789}
6790
6791int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6792{
6793 struct btrfs_root *reloc_root;
6794 struct btrfs_trans_handle *trans;
6795 struct btrfs_key location;
6796 int found;
6797 int ret;
6798
6799 mutex_lock(&root->fs_info->tree_reloc_mutex);
6800 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6801 BUG_ON(ret);
6802 found = !list_empty(&root->fs_info->dead_reloc_roots);
6803 mutex_unlock(&root->fs_info->tree_reloc_mutex);
6804
6805 if (found) {
6806 trans = btrfs_start_transaction(root, 1);
6807 BUG_ON(!trans);
6808 ret = btrfs_commit_transaction(trans, root);
6809 BUG_ON(ret);
6810 }
6811
6812 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6813 location.offset = (u64)-1;
6814 location.type = BTRFS_ROOT_ITEM_KEY;
6815
6816 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6817 BUG_ON(!reloc_root);
6818 btrfs_orphan_cleanup(reloc_root);
6819 return 0;
6820}
6821
d397712b 6822static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6823 struct btrfs_root *root)
6824{
6825 struct btrfs_root *reloc_root;
6826 struct extent_buffer *eb;
6827 struct btrfs_root_item *root_item;
6828 struct btrfs_key root_key;
6829 int ret;
6830
6831 BUG_ON(!root->ref_cows);
6832 if (root->reloc_root)
6833 return 0;
6834
6835 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6836 BUG_ON(!root_item);
6837
6838 ret = btrfs_copy_root(trans, root, root->commit_root,
6839 &eb, BTRFS_TREE_RELOC_OBJECTID);
6840 BUG_ON(ret);
6841
6842 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6843 root_key.offset = root->root_key.objectid;
6844 root_key.type = BTRFS_ROOT_ITEM_KEY;
6845
6846 memcpy(root_item, &root->root_item, sizeof(root_item));
6847 btrfs_set_root_refs(root_item, 0);
6848 btrfs_set_root_bytenr(root_item, eb->start);
6849 btrfs_set_root_level(root_item, btrfs_header_level(eb));
84234f3a 6850 btrfs_set_root_generation(root_item, trans->transid);
1a40e23b
ZY
6851
6852 btrfs_tree_unlock(eb);
6853 free_extent_buffer(eb);
6854
6855 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6856 &root_key, root_item);
6857 BUG_ON(ret);
6858 kfree(root_item);
6859
6860 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6861 &root_key);
6862 BUG_ON(!reloc_root);
6863 reloc_root->last_trans = trans->transid;
6864 reloc_root->commit_root = NULL;
6865 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6866
6867 root->reloc_root = reloc_root;
6868 return 0;
6869}
6870
6871/*
6872 * Core function of space balance.
6873 *
6874 * The idea is using reloc trees to relocate tree blocks in reference
f82d02d9
YZ
6875 * counted roots. There is one reloc tree for each subvol, and all
6876 * reloc trees share same root key objectid. Reloc trees are snapshots
6877 * of the latest committed roots of subvols (root->commit_root).
6878 *
6879 * To relocate a tree block referenced by a subvol, there are two steps.
6880 * COW the block through subvol's reloc tree, then update block pointer
6881 * in the subvol to point to the new block. Since all reloc trees share
6882 * same root key objectid, doing special handing for tree blocks owned
6883 * by them is easy. Once a tree block has been COWed in one reloc tree,
6884 * we can use the resulting new block directly when the same block is
6885 * required to COW again through other reloc trees. By this way, relocated
6886 * tree blocks are shared between reloc trees, so they are also shared
6887 * between subvols.
1a40e23b 6888 */
d397712b 6889static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6890 struct btrfs_root *root,
6891 struct btrfs_path *path,
6892 struct btrfs_key *first_key,
6893 struct btrfs_ref_path *ref_path,
6894 struct btrfs_block_group_cache *group,
6895 struct inode *reloc_inode)
6896{
6897 struct btrfs_root *reloc_root;
6898 struct extent_buffer *eb = NULL;
6899 struct btrfs_key *keys;
6900 u64 *nodes;
6901 int level;
f82d02d9 6902 int shared_level;
1a40e23b 6903 int lowest_level = 0;
1a40e23b
ZY
6904 int ret;
6905
6906 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6907 lowest_level = ref_path->owner_objectid;
6908
f82d02d9 6909 if (!root->ref_cows) {
1a40e23b
ZY
6910 path->lowest_level = lowest_level;
6911 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6912 BUG_ON(ret < 0);
6913 path->lowest_level = 0;
6914 btrfs_release_path(root, path);
6915 return 0;
6916 }
6917
1a40e23b
ZY
6918 mutex_lock(&root->fs_info->tree_reloc_mutex);
6919 ret = init_reloc_tree(trans, root);
6920 BUG_ON(ret);
6921 reloc_root = root->reloc_root;
6922
f82d02d9
YZ
6923 shared_level = ref_path->shared_level;
6924 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
1a40e23b 6925
f82d02d9
YZ
6926 keys = ref_path->node_keys;
6927 nodes = ref_path->new_nodes;
6928 memset(&keys[shared_level + 1], 0,
6929 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6930 memset(&nodes[shared_level + 1], 0,
6931 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
1a40e23b 6932
f82d02d9
YZ
6933 if (nodes[lowest_level] == 0) {
6934 path->lowest_level = lowest_level;
6935 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6936 0, 1);
6937 BUG_ON(ret);
6938 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6939 eb = path->nodes[level];
6940 if (!eb || eb == reloc_root->node)
6941 break;
6942 nodes[level] = eb->start;
6943 if (level == 0)
6944 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6945 else
6946 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6947 }
2b82032c
YZ
6948 if (nodes[0] &&
6949 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
6950 eb = path->nodes[0];
6951 ret = replace_extents_in_leaf(trans, reloc_root, eb,
6952 group, reloc_inode);
6953 BUG_ON(ret);
6954 }
6955 btrfs_release_path(reloc_root, path);
6956 } else {
1a40e23b 6957 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
f82d02d9 6958 lowest_level);
1a40e23b
ZY
6959 BUG_ON(ret);
6960 }
6961
1a40e23b
ZY
6962 /*
6963 * replace tree blocks in the fs tree with tree blocks in
6964 * the reloc tree.
6965 */
6966 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6967 BUG_ON(ret < 0);
6968
6969 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
6970 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6971 0, 0);
6972 BUG_ON(ret);
6973 extent_buffer_get(path->nodes[0]);
6974 eb = path->nodes[0];
6975 btrfs_release_path(reloc_root, path);
1a40e23b
ZY
6976 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6977 BUG_ON(ret);
6978 free_extent_buffer(eb);
6979 }
1a40e23b 6980
f82d02d9 6981 mutex_unlock(&root->fs_info->tree_reloc_mutex);
1a40e23b 6982 path->lowest_level = 0;
1a40e23b
ZY
6983 return 0;
6984}
6985
d397712b 6986static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6987 struct btrfs_root *root,
6988 struct btrfs_path *path,
6989 struct btrfs_key *first_key,
6990 struct btrfs_ref_path *ref_path)
6991{
6992 int ret;
1a40e23b
ZY
6993
6994 ret = relocate_one_path(trans, root, path, first_key,
6995 ref_path, NULL, NULL);
6996 BUG_ON(ret);
6997
1a40e23b
ZY
6998 return 0;
6999}
7000
d397712b 7001static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
1a40e23b
ZY
7002 struct btrfs_root *extent_root,
7003 struct btrfs_path *path,
7004 struct btrfs_key *extent_key)
7005{
7006 int ret;
7007
1a40e23b
ZY
7008 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7009 if (ret)
7010 goto out;
7011 ret = btrfs_del_item(trans, extent_root, path);
7012out:
7013 btrfs_release_path(extent_root, path);
1a40e23b
ZY
7014 return ret;
7015}
7016
d397712b 7017static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
1a40e23b
ZY
7018 struct btrfs_ref_path *ref_path)
7019{
7020 struct btrfs_key root_key;
7021
7022 root_key.objectid = ref_path->root_objectid;
7023 root_key.type = BTRFS_ROOT_ITEM_KEY;
7024 if (is_cowonly_root(ref_path->root_objectid))
7025 root_key.offset = 0;
7026 else
7027 root_key.offset = (u64)-1;
7028
7029 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7030}
7031
d397712b 7032static noinline int relocate_one_extent(struct btrfs_root *extent_root,
1a40e23b
ZY
7033 struct btrfs_path *path,
7034 struct btrfs_key *extent_key,
7035 struct btrfs_block_group_cache *group,
7036 struct inode *reloc_inode, int pass)
7037{
7038 struct btrfs_trans_handle *trans;
7039 struct btrfs_root *found_root;
7040 struct btrfs_ref_path *ref_path = NULL;
7041 struct disk_extent *new_extents = NULL;
7042 int nr_extents = 0;
7043 int loops;
7044 int ret;
7045 int level;
7046 struct btrfs_key first_key;
7047 u64 prev_block = 0;
7048
1a40e23b
ZY
7049
7050 trans = btrfs_start_transaction(extent_root, 1);
7051 BUG_ON(!trans);
7052
7053 if (extent_key->objectid == 0) {
7054 ret = del_extent_zero(trans, extent_root, path, extent_key);
7055 goto out;
7056 }
7057
7058 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7059 if (!ref_path) {
d397712b
CM
7060 ret = -ENOMEM;
7061 goto out;
1a40e23b
ZY
7062 }
7063
7064 for (loops = 0; ; loops++) {
7065 if (loops == 0) {
7066 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7067 extent_key->objectid);
7068 } else {
7069 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7070 }
7071 if (ret < 0)
7072 goto out;
7073 if (ret > 0)
7074 break;
7075
7076 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7077 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7078 continue;
7079
7080 found_root = read_ref_root(extent_root->fs_info, ref_path);
7081 BUG_ON(!found_root);
7082 /*
7083 * for reference counted tree, only process reference paths
7084 * rooted at the latest committed root.
7085 */
7086 if (found_root->ref_cows &&
7087 ref_path->root_generation != found_root->root_key.offset)
7088 continue;
7089
7090 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7091 if (pass == 0) {
7092 /*
7093 * copy data extents to new locations
7094 */
7095 u64 group_start = group->key.objectid;
7096 ret = relocate_data_extent(reloc_inode,
7097 extent_key,
7098 group_start);
7099 if (ret < 0)
7100 goto out;
7101 break;
7102 }
7103 level = 0;
7104 } else {
7105 level = ref_path->owner_objectid;
7106 }
7107
7108 if (prev_block != ref_path->nodes[level]) {
7109 struct extent_buffer *eb;
7110 u64 block_start = ref_path->nodes[level];
7111 u64 block_size = btrfs_level_size(found_root, level);
7112
7113 eb = read_tree_block(found_root, block_start,
7114 block_size, 0);
7115 btrfs_tree_lock(eb);
7116 BUG_ON(level != btrfs_header_level(eb));
7117
7118 if (level == 0)
7119 btrfs_item_key_to_cpu(eb, &first_key, 0);
7120 else
7121 btrfs_node_key_to_cpu(eb, &first_key, 0);
7122
7123 btrfs_tree_unlock(eb);
7124 free_extent_buffer(eb);
7125 prev_block = block_start;
7126 }
7127
24562425 7128 mutex_lock(&extent_root->fs_info->trans_mutex);
e4404d6e 7129 btrfs_record_root_in_trans(found_root);
24562425 7130 mutex_unlock(&extent_root->fs_info->trans_mutex);
e4404d6e
YZ
7131 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7132 /*
7133 * try to update data extent references while
7134 * keeping metadata shared between snapshots.
7135 */
7136 if (pass == 1) {
7137 ret = relocate_one_path(trans, found_root,
7138 path, &first_key, ref_path,
7139 group, reloc_inode);
7140 if (ret < 0)
7141 goto out;
7142 continue;
7143 }
1a40e23b
ZY
7144 /*
7145 * use fallback method to process the remaining
7146 * references.
7147 */
7148 if (!new_extents) {
7149 u64 group_start = group->key.objectid;
d899e052
YZ
7150 new_extents = kmalloc(sizeof(*new_extents),
7151 GFP_NOFS);
7152 nr_extents = 1;
1a40e23b
ZY
7153 ret = get_new_locations(reloc_inode,
7154 extent_key,
d899e052 7155 group_start, 1,
1a40e23b
ZY
7156 &new_extents,
7157 &nr_extents);
d899e052 7158 if (ret)
1a40e23b
ZY
7159 goto out;
7160 }
1a40e23b
ZY
7161 ret = replace_one_extent(trans, found_root,
7162 path, extent_key,
7163 &first_key, ref_path,
7164 new_extents, nr_extents);
e4404d6e 7165 } else {
1a40e23b
ZY
7166 ret = relocate_tree_block(trans, found_root, path,
7167 &first_key, ref_path);
1a40e23b
ZY
7168 }
7169 if (ret < 0)
7170 goto out;
7171 }
7172 ret = 0;
7173out:
7174 btrfs_end_transaction(trans, extent_root);
7175 kfree(new_extents);
7176 kfree(ref_path);
1a40e23b
ZY
7177 return ret;
7178}
5d4f98a2 7179#endif
1a40e23b 7180
ec44a35c
CM
7181static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7182{
7183 u64 num_devices;
7184 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7185 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7186
2b82032c 7187 num_devices = root->fs_info->fs_devices->rw_devices;
ec44a35c
CM
7188 if (num_devices == 1) {
7189 stripped |= BTRFS_BLOCK_GROUP_DUP;
7190 stripped = flags & ~stripped;
7191
7192 /* turn raid0 into single device chunks */
7193 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7194 return stripped;
7195
7196 /* turn mirroring into duplication */
7197 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7198 BTRFS_BLOCK_GROUP_RAID10))
7199 return stripped | BTRFS_BLOCK_GROUP_DUP;
7200 return flags;
7201 } else {
7202 /* they already had raid on here, just return */
ec44a35c
CM
7203 if (flags & stripped)
7204 return flags;
7205
7206 stripped |= BTRFS_BLOCK_GROUP_DUP;
7207 stripped = flags & ~stripped;
7208
7209 /* switch duplicated blocks with raid1 */
7210 if (flags & BTRFS_BLOCK_GROUP_DUP)
7211 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7212
7213 /* turn single device chunks into raid0 */
7214 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7215 }
7216 return flags;
7217}
7218
b2950863 7219static int __alloc_chunk_for_shrink(struct btrfs_root *root,
0ef3e66b
CM
7220 struct btrfs_block_group_cache *shrink_block_group,
7221 int force)
7222{
7223 struct btrfs_trans_handle *trans;
7224 u64 new_alloc_flags;
7225 u64 calc;
7226
c286ac48 7227 spin_lock(&shrink_block_group->lock);
5d4f98a2
YZ
7228 if (btrfs_block_group_used(&shrink_block_group->item) +
7229 shrink_block_group->reserved > 0) {
c286ac48 7230 spin_unlock(&shrink_block_group->lock);
c286ac48 7231
0ef3e66b 7232 trans = btrfs_start_transaction(root, 1);
c286ac48 7233 spin_lock(&shrink_block_group->lock);
7d9eb12c 7234
0ef3e66b
CM
7235 new_alloc_flags = update_block_group_flags(root,
7236 shrink_block_group->flags);
7237 if (new_alloc_flags != shrink_block_group->flags) {
7238 calc =
7239 btrfs_block_group_used(&shrink_block_group->item);
7240 } else {
7241 calc = shrink_block_group->key.offset;
7242 }
c286ac48
CM
7243 spin_unlock(&shrink_block_group->lock);
7244
0ef3e66b
CM
7245 do_chunk_alloc(trans, root->fs_info->extent_root,
7246 calc + 2 * 1024 * 1024, new_alloc_flags, force);
7d9eb12c 7247
0ef3e66b 7248 btrfs_end_transaction(trans, root);
c286ac48
CM
7249 } else
7250 spin_unlock(&shrink_block_group->lock);
0ef3e66b
CM
7251 return 0;
7252}
7253
5d4f98a2
YZ
7254
7255int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
7256 struct btrfs_block_group_cache *group)
7257
7258{
7259 __alloc_chunk_for_shrink(root, group, 1);
7260 set_block_group_readonly(group);
7261 return 0;
7262}
7263
ba1bf481
JB
7264/*
7265 * checks to see if its even possible to relocate this block group.
7266 *
7267 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7268 * ok to go ahead and try.
7269 */
7270int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
1a40e23b 7271{
ba1bf481
JB
7272 struct btrfs_block_group_cache *block_group;
7273 struct btrfs_space_info *space_info;
7274 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7275 struct btrfs_device *device;
7276 int full = 0;
7277 int ret = 0;
1a40e23b 7278
ba1bf481 7279 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1a40e23b 7280
ba1bf481
JB
7281 /* odd, couldn't find the block group, leave it alone */
7282 if (!block_group)
7283 return -1;
1a40e23b 7284
ba1bf481
JB
7285 /* no bytes used, we're good */
7286 if (!btrfs_block_group_used(&block_group->item))
1a40e23b
ZY
7287 goto out;
7288
ba1bf481
JB
7289 space_info = block_group->space_info;
7290 spin_lock(&space_info->lock);
17d217fe 7291
ba1bf481 7292 full = space_info->full;
17d217fe 7293
ba1bf481
JB
7294 /*
7295 * if this is the last block group we have in this space, we can't
7ce618db
CM
7296 * relocate it unless we're able to allocate a new chunk below.
7297 *
7298 * Otherwise, we need to make sure we have room in the space to handle
7299 * all of the extents from this block group. If we can, we're good
ba1bf481 7300 */
7ce618db
CM
7301 if ((space_info->total_bytes != block_group->key.offset) &&
7302 (space_info->bytes_used + space_info->bytes_reserved +
ba1bf481
JB
7303 space_info->bytes_pinned + space_info->bytes_readonly +
7304 btrfs_block_group_used(&block_group->item) <
7ce618db 7305 space_info->total_bytes)) {
ba1bf481
JB
7306 spin_unlock(&space_info->lock);
7307 goto out;
17d217fe 7308 }
ba1bf481 7309 spin_unlock(&space_info->lock);
ea8c2819 7310
ba1bf481
JB
7311 /*
7312 * ok we don't have enough space, but maybe we have free space on our
7313 * devices to allocate new chunks for relocation, so loop through our
7314 * alloc devices and guess if we have enough space. However, if we
7315 * were marked as full, then we know there aren't enough chunks, and we
7316 * can just return.
7317 */
7318 ret = -1;
7319 if (full)
7320 goto out;
ea8c2819 7321
ba1bf481
JB
7322 mutex_lock(&root->fs_info->chunk_mutex);
7323 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7324 u64 min_free = btrfs_block_group_used(&block_group->item);
7325 u64 dev_offset, max_avail;
56bec294 7326
ba1bf481
JB
7327 /*
7328 * check to make sure we can actually find a chunk with enough
7329 * space to fit our block group in.
7330 */
7331 if (device->total_bytes > device->bytes_used + min_free) {
7332 ret = find_free_dev_extent(NULL, device, min_free,
7333 &dev_offset, &max_avail);
7334 if (!ret)
73e48b27 7335 break;
ba1bf481 7336 ret = -1;
725c8463 7337 }
edbd8d4e 7338 }
ba1bf481 7339 mutex_unlock(&root->fs_info->chunk_mutex);
edbd8d4e 7340out:
ba1bf481 7341 btrfs_put_block_group(block_group);
edbd8d4e
CM
7342 return ret;
7343}
7344
b2950863
CH
7345static int find_first_block_group(struct btrfs_root *root,
7346 struct btrfs_path *path, struct btrfs_key *key)
0b86a832 7347{
925baedd 7348 int ret = 0;
0b86a832
CM
7349 struct btrfs_key found_key;
7350 struct extent_buffer *leaf;
7351 int slot;
edbd8d4e 7352
0b86a832
CM
7353 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7354 if (ret < 0)
925baedd
CM
7355 goto out;
7356
d397712b 7357 while (1) {
0b86a832 7358 slot = path->slots[0];
edbd8d4e 7359 leaf = path->nodes[0];
0b86a832
CM
7360 if (slot >= btrfs_header_nritems(leaf)) {
7361 ret = btrfs_next_leaf(root, path);
7362 if (ret == 0)
7363 continue;
7364 if (ret < 0)
925baedd 7365 goto out;
0b86a832 7366 break;
edbd8d4e 7367 }
0b86a832 7368 btrfs_item_key_to_cpu(leaf, &found_key, slot);
edbd8d4e 7369
0b86a832 7370 if (found_key.objectid >= key->objectid &&
925baedd
CM
7371 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7372 ret = 0;
7373 goto out;
7374 }
0b86a832 7375 path->slots[0]++;
edbd8d4e 7376 }
925baedd 7377out:
0b86a832 7378 return ret;
edbd8d4e
CM
7379}
7380
1a40e23b
ZY
7381int btrfs_free_block_groups(struct btrfs_fs_info *info)
7382{
7383 struct btrfs_block_group_cache *block_group;
4184ea7f 7384 struct btrfs_space_info *space_info;
11833d66 7385 struct btrfs_caching_control *caching_ctl;
1a40e23b
ZY
7386 struct rb_node *n;
7387
11833d66
YZ
7388 down_write(&info->extent_commit_sem);
7389 while (!list_empty(&info->caching_block_groups)) {
7390 caching_ctl = list_entry(info->caching_block_groups.next,
7391 struct btrfs_caching_control, list);
7392 list_del(&caching_ctl->list);
7393 put_caching_control(caching_ctl);
7394 }
7395 up_write(&info->extent_commit_sem);
7396
1a40e23b
ZY
7397 spin_lock(&info->block_group_cache_lock);
7398 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7399 block_group = rb_entry(n, struct btrfs_block_group_cache,
7400 cache_node);
1a40e23b
ZY
7401 rb_erase(&block_group->cache_node,
7402 &info->block_group_cache_tree);
d899e052
YZ
7403 spin_unlock(&info->block_group_cache_lock);
7404
80eb234a 7405 down_write(&block_group->space_info->groups_sem);
1a40e23b 7406 list_del(&block_group->list);
80eb234a 7407 up_write(&block_group->space_info->groups_sem);
d2fb3437 7408
817d52f8 7409 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7410 wait_block_group_cache_done(block_group);
817d52f8
JB
7411
7412 btrfs_remove_free_space_cache(block_group);
11dfe35a 7413 btrfs_put_block_group(block_group);
d899e052
YZ
7414
7415 spin_lock(&info->block_group_cache_lock);
1a40e23b
ZY
7416 }
7417 spin_unlock(&info->block_group_cache_lock);
4184ea7f
CM
7418
7419 /* now that all the block groups are freed, go through and
7420 * free all the space_info structs. This is only called during
7421 * the final stages of unmount, and so we know nobody is
7422 * using them. We call synchronize_rcu() once before we start,
7423 * just to be on the safe side.
7424 */
7425 synchronize_rcu();
7426
7427 while(!list_empty(&info->space_info)) {
7428 space_info = list_entry(info->space_info.next,
7429 struct btrfs_space_info,
7430 list);
7431
7432 list_del(&space_info->list);
7433 kfree(space_info);
7434 }
1a40e23b
ZY
7435 return 0;
7436}
7437
b742bb82
YZ
7438static void __link_block_group(struct btrfs_space_info *space_info,
7439 struct btrfs_block_group_cache *cache)
7440{
7441 int index = get_block_group_index(cache);
7442
7443 down_write(&space_info->groups_sem);
7444 list_add_tail(&cache->list, &space_info->block_groups[index]);
7445 up_write(&space_info->groups_sem);
7446}
7447
9078a3e1
CM
7448int btrfs_read_block_groups(struct btrfs_root *root)
7449{
7450 struct btrfs_path *path;
7451 int ret;
9078a3e1 7452 struct btrfs_block_group_cache *cache;
be744175 7453 struct btrfs_fs_info *info = root->fs_info;
6324fbf3 7454 struct btrfs_space_info *space_info;
9078a3e1
CM
7455 struct btrfs_key key;
7456 struct btrfs_key found_key;
5f39d397 7457 struct extent_buffer *leaf;
96b5179d 7458
be744175 7459 root = info->extent_root;
9078a3e1 7460 key.objectid = 0;
0b86a832 7461 key.offset = 0;
9078a3e1 7462 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
9078a3e1
CM
7463 path = btrfs_alloc_path();
7464 if (!path)
7465 return -ENOMEM;
7466
d397712b 7467 while (1) {
0b86a832 7468 ret = find_first_block_group(root, path, &key);
b742bb82
YZ
7469 if (ret > 0)
7470 break;
0b86a832
CM
7471 if (ret != 0)
7472 goto error;
7473
5f39d397
CM
7474 leaf = path->nodes[0];
7475 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8f18cf13 7476 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9078a3e1 7477 if (!cache) {
0b86a832 7478 ret = -ENOMEM;
9078a3e1
CM
7479 break;
7480 }
3e1ad54f 7481
d2fb3437 7482 atomic_set(&cache->count, 1);
c286ac48 7483 spin_lock_init(&cache->lock);
6226cb0a 7484 spin_lock_init(&cache->tree_lock);
817d52f8 7485 cache->fs_info = info;
0f9dd46c 7486 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7487 INIT_LIST_HEAD(&cache->cluster_list);
96303081
JB
7488
7489 /*
7490 * we only want to have 32k of ram per block group for keeping
7491 * track of free space, and if we pass 1/2 of that we want to
7492 * start converting things over to using bitmaps
7493 */
7494 cache->extents_thresh = ((1024 * 32) / 2) /
7495 sizeof(struct btrfs_free_space);
7496
5f39d397
CM
7497 read_extent_buffer(leaf, &cache->item,
7498 btrfs_item_ptr_offset(leaf, path->slots[0]),
7499 sizeof(cache->item));
9078a3e1 7500 memcpy(&cache->key, &found_key, sizeof(found_key));
0b86a832 7501
9078a3e1
CM
7502 key.objectid = found_key.objectid + found_key.offset;
7503 btrfs_release_path(root, path);
0b86a832 7504 cache->flags = btrfs_block_group_flags(&cache->item);
817d52f8
JB
7505 cache->sectorsize = root->sectorsize;
7506
817d52f8
JB
7507 /*
7508 * check for two cases, either we are full, and therefore
7509 * don't need to bother with the caching work since we won't
7510 * find any space, or we are empty, and we can just add all
7511 * the space in and be done with it. This saves us _alot_ of
7512 * time, particularly in the full case.
7513 */
7514 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1b2da372 7515 exclude_super_stripes(root, cache);
11833d66 7516 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7517 cache->cached = BTRFS_CACHE_FINISHED;
1b2da372 7518 free_excluded_extents(root, cache);
817d52f8 7519 } else if (btrfs_block_group_used(&cache->item) == 0) {
11833d66
YZ
7520 exclude_super_stripes(root, cache);
7521 cache->last_byte_to_unpin = (u64)-1;
817d52f8
JB
7522 cache->cached = BTRFS_CACHE_FINISHED;
7523 add_new_free_space(cache, root->fs_info,
7524 found_key.objectid,
7525 found_key.objectid +
7526 found_key.offset);
11833d66 7527 free_excluded_extents(root, cache);
817d52f8 7528 }
96b5179d 7529
6324fbf3
CM
7530 ret = update_space_info(info, cache->flags, found_key.offset,
7531 btrfs_block_group_used(&cache->item),
7532 &space_info);
7533 BUG_ON(ret);
7534 cache->space_info = space_info;
1b2da372
JB
7535 spin_lock(&cache->space_info->lock);
7536 cache->space_info->bytes_super += cache->bytes_super;
7537 spin_unlock(&cache->space_info->lock);
7538
b742bb82 7539 __link_block_group(space_info, cache);
0f9dd46c
JB
7540
7541 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7542 BUG_ON(ret);
75ccf47d
CM
7543
7544 set_avail_alloc_bits(root->fs_info, cache->flags);
2b82032c
YZ
7545 if (btrfs_chunk_readonly(root, cache->key.objectid))
7546 set_block_group_readonly(cache);
9078a3e1 7547 }
b742bb82
YZ
7548
7549 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
7550 if (!(get_alloc_profile(root, space_info->flags) &
7551 (BTRFS_BLOCK_GROUP_RAID10 |
7552 BTRFS_BLOCK_GROUP_RAID1 |
7553 BTRFS_BLOCK_GROUP_DUP)))
7554 continue;
7555 /*
7556 * avoid allocating from un-mirrored block group if there are
7557 * mirrored block groups.
7558 */
7559 list_for_each_entry(cache, &space_info->block_groups[3], list)
7560 set_block_group_readonly(cache);
7561 list_for_each_entry(cache, &space_info->block_groups[4], list)
7562 set_block_group_readonly(cache);
7563 }
0b86a832
CM
7564 ret = 0;
7565error:
9078a3e1 7566 btrfs_free_path(path);
0b86a832 7567 return ret;
9078a3e1 7568}
6324fbf3
CM
7569
7570int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7571 struct btrfs_root *root, u64 bytes_used,
e17cade2 7572 u64 type, u64 chunk_objectid, u64 chunk_offset,
6324fbf3
CM
7573 u64 size)
7574{
7575 int ret;
6324fbf3
CM
7576 struct btrfs_root *extent_root;
7577 struct btrfs_block_group_cache *cache;
6324fbf3
CM
7578
7579 extent_root = root->fs_info->extent_root;
6324fbf3 7580
12fcfd22 7581 root->fs_info->last_trans_log_full_commit = trans->transid;
e02119d5 7582
8f18cf13 7583 cache = kzalloc(sizeof(*cache), GFP_NOFS);
0f9dd46c
JB
7584 if (!cache)
7585 return -ENOMEM;
7586
e17cade2 7587 cache->key.objectid = chunk_offset;
6324fbf3 7588 cache->key.offset = size;
d2fb3437 7589 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
96303081
JB
7590 cache->sectorsize = root->sectorsize;
7591
7592 /*
7593 * we only want to have 32k of ram per block group for keeping track
7594 * of free space, and if we pass 1/2 of that we want to start
7595 * converting things over to using bitmaps
7596 */
7597 cache->extents_thresh = ((1024 * 32) / 2) /
7598 sizeof(struct btrfs_free_space);
d2fb3437 7599 atomic_set(&cache->count, 1);
c286ac48 7600 spin_lock_init(&cache->lock);
6226cb0a 7601 spin_lock_init(&cache->tree_lock);
0f9dd46c 7602 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7603 INIT_LIST_HEAD(&cache->cluster_list);
0ef3e66b 7604
6324fbf3 7605 btrfs_set_block_group_used(&cache->item, bytes_used);
6324fbf3
CM
7606 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7607 cache->flags = type;
7608 btrfs_set_block_group_flags(&cache->item, type);
7609
11833d66 7610 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7611 cache->cached = BTRFS_CACHE_FINISHED;
11833d66 7612 exclude_super_stripes(root, cache);
96303081 7613
817d52f8
JB
7614 add_new_free_space(cache, root->fs_info, chunk_offset,
7615 chunk_offset + size);
7616
11833d66
YZ
7617 free_excluded_extents(root, cache);
7618
6324fbf3
CM
7619 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7620 &cache->space_info);
7621 BUG_ON(ret);
1b2da372
JB
7622
7623 spin_lock(&cache->space_info->lock);
7624 cache->space_info->bytes_super += cache->bytes_super;
7625 spin_unlock(&cache->space_info->lock);
7626
b742bb82 7627 __link_block_group(cache->space_info, cache);
6324fbf3 7628
0f9dd46c
JB
7629 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7630 BUG_ON(ret);
c286ac48 7631
6324fbf3
CM
7632 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7633 sizeof(cache->item));
7634 BUG_ON(ret);
7635
d18a2c44 7636 set_avail_alloc_bits(extent_root->fs_info, type);
925baedd 7637
6324fbf3
CM
7638 return 0;
7639}
1a40e23b
ZY
7640
7641int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7642 struct btrfs_root *root, u64 group_start)
7643{
7644 struct btrfs_path *path;
7645 struct btrfs_block_group_cache *block_group;
44fb5511 7646 struct btrfs_free_cluster *cluster;
1a40e23b
ZY
7647 struct btrfs_key key;
7648 int ret;
7649
1a40e23b
ZY
7650 root = root->fs_info->extent_root;
7651
7652 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7653 BUG_ON(!block_group);
c146afad 7654 BUG_ON(!block_group->ro);
1a40e23b
ZY
7655
7656 memcpy(&key, &block_group->key, sizeof(key));
7657
44fb5511
CM
7658 /* make sure this block group isn't part of an allocation cluster */
7659 cluster = &root->fs_info->data_alloc_cluster;
7660 spin_lock(&cluster->refill_lock);
7661 btrfs_return_cluster_to_free_space(block_group, cluster);
7662 spin_unlock(&cluster->refill_lock);
7663
7664 /*
7665 * make sure this block group isn't part of a metadata
7666 * allocation cluster
7667 */
7668 cluster = &root->fs_info->meta_alloc_cluster;
7669 spin_lock(&cluster->refill_lock);
7670 btrfs_return_cluster_to_free_space(block_group, cluster);
7671 spin_unlock(&cluster->refill_lock);
7672
1a40e23b
ZY
7673 path = btrfs_alloc_path();
7674 BUG_ON(!path);
7675
3dfdb934 7676 spin_lock(&root->fs_info->block_group_cache_lock);
1a40e23b
ZY
7677 rb_erase(&block_group->cache_node,
7678 &root->fs_info->block_group_cache_tree);
3dfdb934 7679 spin_unlock(&root->fs_info->block_group_cache_lock);
817d52f8 7680
80eb234a 7681 down_write(&block_group->space_info->groups_sem);
44fb5511
CM
7682 /*
7683 * we must use list_del_init so people can check to see if they
7684 * are still on the list after taking the semaphore
7685 */
7686 list_del_init(&block_group->list);
80eb234a 7687 up_write(&block_group->space_info->groups_sem);
1a40e23b 7688
817d52f8 7689 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7690 wait_block_group_cache_done(block_group);
817d52f8
JB
7691
7692 btrfs_remove_free_space_cache(block_group);
7693
c146afad
YZ
7694 spin_lock(&block_group->space_info->lock);
7695 block_group->space_info->total_bytes -= block_group->key.offset;
7696 block_group->space_info->bytes_readonly -= block_group->key.offset;
7697 spin_unlock(&block_group->space_info->lock);
283bb197
CM
7698
7699 btrfs_clear_space_info_full(root->fs_info);
c146afad 7700
fa9c0d79
CM
7701 btrfs_put_block_group(block_group);
7702 btrfs_put_block_group(block_group);
1a40e23b
ZY
7703
7704 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7705 if (ret > 0)
7706 ret = -EIO;
7707 if (ret < 0)
7708 goto out;
7709
7710 ret = btrfs_del_item(trans, root, path);
7711out:
7712 btrfs_free_path(path);
7713 return ret;
7714}
This page took 0.567875 seconds and 5 git commands to generate.