74cfbee2ff336648a621fe00017f343846fd0445
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "print-tree.h"
23 #include "transaction.h"
24
25 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
26 btrfs_root *extent_root);
27 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
28 btrfs_root *extent_root);
29
30 static int cache_block_group(struct btrfs_root *root,
31 struct btrfs_block_group_cache *block_group)
32 {
33 struct btrfs_path *path;
34 int ret;
35 struct btrfs_key key;
36 struct extent_buffer *leaf;
37 struct extent_map_tree *free_space_cache;
38 int slot;
39 u64 last = 0;
40 u64 hole_size;
41 u64 first_free;
42 int found = 0;
43
44 root = root->fs_info->extent_root;
45 free_space_cache = &root->fs_info->free_space_cache;
46
47 if (block_group->cached)
48 return 0;
49
50 path = btrfs_alloc_path();
51 if (!path)
52 return -ENOMEM;
53
54 path->reada = 2;
55 first_free = block_group->key.objectid;
56 key.objectid = block_group->key.objectid;
57 key.offset = 0;
58
59 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
61
62 if (ret < 0)
63 return ret;
64
65 if (ret && path->slots[0] > 0)
66 path->slots[0]--;
67
68 while(1) {
69 leaf = path->nodes[0];
70 slot = path->slots[0];
71 if (slot >= btrfs_header_nritems(leaf)) {
72 ret = btrfs_next_leaf(root, path);
73 if (ret < 0)
74 goto err;
75 if (ret == 0) {
76 continue;
77 } else {
78 break;
79 }
80 }
81
82 btrfs_item_key_to_cpu(leaf, &key, slot);
83 if (key.objectid < block_group->key.objectid) {
84 if (key.objectid + key.offset > first_free)
85 first_free = key.objectid + key.offset;
86 goto next;
87 }
88
89 if (key.objectid >= block_group->key.objectid +
90 block_group->key.offset) {
91 break;
92 }
93
94 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
95 if (!found) {
96 last = first_free;
97 found = 1;
98 }
99 if (key.objectid > last) {
100 hole_size = key.objectid - last;
101 set_extent_dirty(free_space_cache, last,
102 last + hole_size - 1,
103 GFP_NOFS);
104 }
105 last = key.objectid + key.offset;
106 }
107 next:
108 path->slots[0]++;
109 }
110
111 if (!found)
112 last = first_free;
113 if (block_group->key.objectid +
114 block_group->key.offset > last) {
115 hole_size = block_group->key.objectid +
116 block_group->key.offset - last;
117 set_extent_dirty(free_space_cache, last,
118 last + hole_size - 1, GFP_NOFS);
119 }
120 block_group->cached = 1;
121 err:
122 btrfs_free_path(path);
123 return 0;
124 }
125
126 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
127 btrfs_fs_info *info,
128 u64 blocknr)
129 {
130 struct btrfs_block_group_cache *block_group;
131 int ret;
132
133 ret = radix_tree_gang_lookup(&info->block_group_radix,
134 (void **)&block_group,
135 blocknr, 1);
136 if (ret) {
137 if (block_group->key.objectid <= blocknr && blocknr <=
138 block_group->key.objectid + block_group->key.offset)
139 return block_group;
140 }
141 ret = radix_tree_gang_lookup(&info->block_group_data_radix,
142 (void **)&block_group,
143 blocknr, 1);
144 if (ret) {
145 if (block_group->key.objectid <= blocknr && blocknr <=
146 block_group->key.objectid + block_group->key.offset)
147 return block_group;
148 }
149 return NULL;
150 }
151
152 static u64 find_search_start(struct btrfs_root *root,
153 struct btrfs_block_group_cache **cache_ret,
154 u64 search_start, int num, int data)
155 {
156 int ret;
157 struct btrfs_block_group_cache *cache = *cache_ret;
158 u64 last = max(search_start, cache->key.objectid);
159 u64 start = 0;
160 u64 end = 0;
161
162 again:
163 ret = cache_block_group(root, cache);
164 if (ret)
165 goto out;
166 while(1) {
167 ret = find_first_extent_bit(&root->fs_info->free_space_cache,
168 last, &start, &end, EXTENT_DIRTY);
169 if (ret)
170 goto out;
171
172 start = max(last, start);
173 last = end + 1;
174 if (end + 1 - start < num)
175 continue;
176 if (start + num > cache->key.objectid + cache->key.offset)
177 goto new_group;
178 return start;
179 }
180 out:
181 return max(cache->last_alloc, search_start);
182
183 new_group:
184 cache = btrfs_lookup_block_group(root->fs_info,
185 last + cache->key.offset - 1);
186 if (!cache) {
187 return max((*cache_ret)->last_alloc, search_start);
188 }
189 cache = btrfs_find_block_group(root, cache,
190 last + cache->key.offset - 1, data, 0);
191 *cache_ret = cache;
192 goto again;
193 }
194
195 static u64 div_factor(u64 num, int factor)
196 {
197 num *= factor;
198 do_div(num, 10);
199 return num;
200 }
201
202 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
203 struct btrfs_block_group_cache
204 *hint, u64 search_start,
205 int data, int owner)
206 {
207 struct btrfs_block_group_cache *cache[8];
208 struct btrfs_block_group_cache *found_group = NULL;
209 struct btrfs_fs_info *info = root->fs_info;
210 struct radix_tree_root *radix;
211 struct radix_tree_root *swap_radix;
212 u64 used;
213 u64 last = 0;
214 u64 hint_last;
215 int i;
216 int ret;
217 int full_search = 0;
218 int factor = 8;
219 int data_swap = 0;
220
221 if (!owner)
222 factor = 5;
223
224 if (data) {
225 radix = &info->block_group_data_radix;
226 swap_radix = &info->block_group_radix;
227 } else {
228 radix = &info->block_group_radix;
229 swap_radix = &info->block_group_data_radix;
230 }
231
232 if (search_start) {
233 struct btrfs_block_group_cache *shint;
234 shint = btrfs_lookup_block_group(info, search_start);
235 if (shint && shint->data == data) {
236 used = btrfs_block_group_used(&shint->item);
237 if (used + shint->pinned <
238 div_factor(shint->key.offset, factor)) {
239 return shint;
240 }
241 }
242 }
243 if (hint && hint->data == data) {
244 used = btrfs_block_group_used(&hint->item);
245 if (used + hint->pinned <
246 div_factor(hint->key.offset, factor)) {
247 return hint;
248 }
249 if (used >= div_factor(hint->key.offset, 8)) {
250 radix_tree_tag_clear(radix,
251 hint->key.objectid +
252 hint->key.offset - 1,
253 BTRFS_BLOCK_GROUP_AVAIL);
254 }
255 last = hint->key.offset * 3;
256 if (hint->key.objectid >= last)
257 last = max(search_start + hint->key.offset - 1,
258 hint->key.objectid - last);
259 else
260 last = hint->key.objectid + hint->key.offset;
261 hint_last = last;
262 } else {
263 if (hint)
264 hint_last = max(hint->key.objectid, search_start);
265 else
266 hint_last = search_start;
267
268 last = hint_last;
269 }
270 while(1) {
271 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
272 last, ARRAY_SIZE(cache),
273 BTRFS_BLOCK_GROUP_AVAIL);
274 if (!ret)
275 break;
276 for (i = 0; i < ret; i++) {
277 last = cache[i]->key.objectid +
278 cache[i]->key.offset;
279 used = btrfs_block_group_used(&cache[i]->item);
280 if (used + cache[i]->pinned <
281 div_factor(cache[i]->key.offset, factor)) {
282 found_group = cache[i];
283 goto found;
284 }
285 if (used >= div_factor(cache[i]->key.offset, 8)) {
286 radix_tree_tag_clear(radix,
287 cache[i]->key.objectid +
288 cache[i]->key.offset - 1,
289 BTRFS_BLOCK_GROUP_AVAIL);
290 }
291 }
292 cond_resched();
293 }
294 last = hint_last;
295 again:
296 while(1) {
297 ret = radix_tree_gang_lookup(radix, (void **)cache,
298 last, ARRAY_SIZE(cache));
299 if (!ret)
300 break;
301 for (i = 0; i < ret; i++) {
302 last = cache[i]->key.objectid +
303 cache[i]->key.offset;
304 used = btrfs_block_group_used(&cache[i]->item);
305 if (used + cache[i]->pinned < cache[i]->key.offset) {
306 found_group = cache[i];
307 goto found;
308 }
309 if (used >= cache[i]->key.offset) {
310 radix_tree_tag_clear(radix,
311 cache[i]->key.objectid +
312 cache[i]->key.offset - 1,
313 BTRFS_BLOCK_GROUP_AVAIL);
314 }
315 }
316 cond_resched();
317 }
318 if (!full_search) {
319 last = search_start;
320 full_search = 1;
321 goto again;
322 }
323 if (!data_swap) {
324 struct radix_tree_root *tmp = radix;
325 data_swap = 1;
326 radix = swap_radix;
327 swap_radix = tmp;
328 last = search_start;
329 goto again;
330 }
331 if (!found_group) {
332 ret = radix_tree_gang_lookup(radix,
333 (void **)&found_group, 0, 1);
334 if (ret == 0) {
335 ret = radix_tree_gang_lookup(swap_radix,
336 (void **)&found_group,
337 0, 1);
338 }
339 BUG_ON(ret != 1);
340 }
341 found:
342 return found_group;
343 }
344
345 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
346 struct btrfs_root *root,
347 u64 blocknr, u64 num_blocks)
348 {
349 struct btrfs_path *path;
350 int ret;
351 struct btrfs_key key;
352 struct extent_buffer *l;
353 struct btrfs_extent_item *item;
354 u32 refs;
355
356 path = btrfs_alloc_path();
357 if (!path)
358 return -ENOMEM;
359
360 key.objectid = blocknr;
361 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
362 key.offset = num_blocks;
363 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
364 0, 1);
365 if (ret < 0)
366 return ret;
367 if (ret != 0) {
368 BUG();
369 }
370 BUG_ON(ret != 0);
371 l = path->nodes[0];
372 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
373 refs = btrfs_extent_refs(l, item);
374 btrfs_set_extent_refs(l, item, refs + 1);
375 btrfs_mark_buffer_dirty(path->nodes[0]);
376
377 btrfs_release_path(root->fs_info->extent_root, path);
378 btrfs_free_path(path);
379 finish_current_insert(trans, root->fs_info->extent_root);
380 del_pending_extents(trans, root->fs_info->extent_root);
381 return 0;
382 }
383
384 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
385 struct btrfs_root *root)
386 {
387 finish_current_insert(trans, root->fs_info->extent_root);
388 del_pending_extents(trans, root->fs_info->extent_root);
389 return 0;
390 }
391
392 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
393 struct btrfs_root *root, u64 blocknr,
394 u64 num_blocks, u32 *refs)
395 {
396 struct btrfs_path *path;
397 int ret;
398 struct btrfs_key key;
399 struct extent_buffer *l;
400 struct btrfs_extent_item *item;
401
402 path = btrfs_alloc_path();
403 key.objectid = blocknr;
404 key.offset = num_blocks;
405 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
406 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
407 0, 0);
408 if (ret < 0)
409 goto out;
410 if (ret != 0) {
411 btrfs_print_leaf(root, path->nodes[0]);
412 printk("failed to find block number %Lu\n", blocknr);
413 BUG();
414 }
415 l = path->nodes[0];
416 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
417 *refs = btrfs_extent_refs(l, item);
418 out:
419 btrfs_free_path(path);
420 return 0;
421 }
422
423 int btrfs_inc_root_ref(struct btrfs_trans_handle *trans,
424 struct btrfs_root *root)
425 {
426 return btrfs_inc_extent_ref(trans, root,
427 extent_buffer_blocknr(root->node), 1);
428 }
429
430 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
431 struct extent_buffer *buf)
432 {
433 u64 blocknr;
434 u32 nritems;
435 struct btrfs_key key;
436 struct btrfs_file_extent_item *fi;
437 int i;
438 int leaf;
439 int ret;
440 int faili;
441 int err;
442
443 if (!root->ref_cows)
444 return 0;
445
446 leaf = btrfs_is_leaf(buf);
447 nritems = btrfs_header_nritems(buf);
448 for (i = 0; i < nritems; i++) {
449 if (leaf) {
450 u64 disk_blocknr;
451 btrfs_item_key_to_cpu(buf, &key, i);
452 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
453 continue;
454 fi = btrfs_item_ptr(buf, i,
455 struct btrfs_file_extent_item);
456 if (btrfs_file_extent_type(buf, fi) ==
457 BTRFS_FILE_EXTENT_INLINE)
458 continue;
459 disk_blocknr = btrfs_file_extent_disk_blocknr(buf, fi);
460 if (disk_blocknr == 0)
461 continue;
462 ret = btrfs_inc_extent_ref(trans, root, disk_blocknr,
463 btrfs_file_extent_disk_num_blocks(buf, fi));
464 if (ret) {
465 faili = i;
466 goto fail;
467 }
468 } else {
469 blocknr = btrfs_node_blockptr(buf, i);
470 ret = btrfs_inc_extent_ref(trans, root, blocknr, 1);
471 if (ret) {
472 faili = i;
473 goto fail;
474 }
475 }
476 }
477 return 0;
478 fail:
479 WARN_ON(1);
480 for (i =0; i < faili; i++) {
481 if (leaf) {
482 u64 disk_blocknr;
483 btrfs_item_key_to_cpu(buf, &key, i);
484 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
485 continue;
486 fi = btrfs_item_ptr(buf, i,
487 struct btrfs_file_extent_item);
488 if (btrfs_file_extent_type(buf, fi) ==
489 BTRFS_FILE_EXTENT_INLINE)
490 continue;
491 disk_blocknr = btrfs_file_extent_disk_blocknr(buf, fi);
492 if (disk_blocknr == 0)
493 continue;
494 err = btrfs_free_extent(trans, root, disk_blocknr,
495 btrfs_file_extent_disk_num_blocks(buf,
496 fi), 0);
497 BUG_ON(err);
498 } else {
499 blocknr = btrfs_node_blockptr(buf, i);
500 err = btrfs_free_extent(trans, root, blocknr, 1, 0);
501 BUG_ON(err);
502 }
503 }
504 return ret;
505 }
506
507 static int write_one_cache_group(struct btrfs_trans_handle *trans,
508 struct btrfs_root *root,
509 struct btrfs_path *path,
510 struct btrfs_block_group_cache *cache)
511 {
512 int ret;
513 int pending_ret;
514 struct btrfs_root *extent_root = root->fs_info->extent_root;
515 unsigned long bi;
516 struct extent_buffer *leaf;
517
518 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
519 if (ret < 0)
520 goto fail;
521 BUG_ON(ret);
522
523 leaf = path->nodes[0];
524 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
525 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
526 btrfs_mark_buffer_dirty(leaf);
527 btrfs_release_path(extent_root, path);
528 fail:
529 finish_current_insert(trans, extent_root);
530 pending_ret = del_pending_extents(trans, extent_root);
531 if (ret)
532 return ret;
533 if (pending_ret)
534 return pending_ret;
535 if (cache->data)
536 cache->last_alloc = cache->first_free;
537 return 0;
538
539 }
540
541 static int write_dirty_block_radix(struct btrfs_trans_handle *trans,
542 struct btrfs_root *root,
543 struct radix_tree_root *radix)
544 {
545 struct btrfs_block_group_cache *cache[8];
546 int ret;
547 int err = 0;
548 int werr = 0;
549 int i;
550 struct btrfs_path *path;
551 unsigned long off = 0;
552
553 path = btrfs_alloc_path();
554 if (!path)
555 return -ENOMEM;
556
557 while(1) {
558 ret = radix_tree_gang_lookup_tag(radix, (void **)cache,
559 off, ARRAY_SIZE(cache),
560 BTRFS_BLOCK_GROUP_DIRTY);
561 if (!ret)
562 break;
563 for (i = 0; i < ret; i++) {
564 err = write_one_cache_group(trans, root,
565 path, cache[i]);
566 /*
567 * if we fail to write the cache group, we want
568 * to keep it marked dirty in hopes that a later
569 * write will work
570 */
571 if (err) {
572 werr = err;
573 off = cache[i]->key.objectid +
574 cache[i]->key.offset;
575 continue;
576 }
577
578 radix_tree_tag_clear(radix, cache[i]->key.objectid +
579 cache[i]->key.offset - 1,
580 BTRFS_BLOCK_GROUP_DIRTY);
581 }
582 }
583 btrfs_free_path(path);
584 return werr;
585 }
586
587 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
588 struct btrfs_root *root)
589 {
590 int ret;
591 int ret2;
592 ret = write_dirty_block_radix(trans, root,
593 &root->fs_info->block_group_radix);
594 ret2 = write_dirty_block_radix(trans, root,
595 &root->fs_info->block_group_data_radix);
596 if (ret)
597 return ret;
598 if (ret2)
599 return ret2;
600 return 0;
601 }
602
603 static int update_block_group(struct btrfs_trans_handle *trans,
604 struct btrfs_root *root,
605 u64 blocknr, u64 num, int alloc, int mark_free,
606 int data)
607 {
608 struct btrfs_block_group_cache *cache;
609 struct btrfs_fs_info *info = root->fs_info;
610 u64 total = num;
611 u64 old_val;
612 u64 block_in_group;
613 int ret;
614
615 while(total) {
616 cache = btrfs_lookup_block_group(info, blocknr);
617 if (!cache) {
618 return -1;
619 }
620 block_in_group = blocknr - cache->key.objectid;
621 WARN_ON(block_in_group > cache->key.offset);
622 radix_tree_tag_set(cache->radix, cache->key.objectid +
623 cache->key.offset - 1,
624 BTRFS_BLOCK_GROUP_DIRTY);
625
626 old_val = btrfs_block_group_used(&cache->item);
627 num = min(total, cache->key.offset - block_in_group);
628 if (alloc) {
629 if (blocknr > cache->last_alloc)
630 cache->last_alloc = blocknr;
631 if (cache->data != data &&
632 old_val < (cache->key.offset >> 1)) {
633 cache->data = data;
634 radix_tree_delete(cache->radix,
635 cache->key.objectid +
636 cache->key.offset - 1);
637
638 if (data) {
639 cache->radix =
640 &info->block_group_data_radix;
641 cache->item.flags |=
642 BTRFS_BLOCK_GROUP_DATA;
643 } else {
644 cache->radix = &info->block_group_radix;
645 cache->item.flags &=
646 ~BTRFS_BLOCK_GROUP_DATA;
647 }
648 ret = radix_tree_insert(cache->radix,
649 cache->key.objectid +
650 cache->key.offset - 1,
651 (void *)cache);
652 }
653 old_val += num;
654 } else {
655 old_val -= num;
656 if (blocknr < cache->first_free)
657 cache->first_free = blocknr;
658 if (mark_free) {
659 set_extent_dirty(&info->free_space_cache,
660 blocknr, blocknr + num - 1,
661 GFP_NOFS);
662 }
663 if (old_val < (cache->key.offset >> 1) &&
664 old_val + num >= (cache->key.offset >> 1)) {
665 radix_tree_tag_set(cache->radix,
666 cache->key.objectid +
667 cache->key.offset - 1,
668 BTRFS_BLOCK_GROUP_AVAIL);
669 }
670 }
671 btrfs_set_block_group_used(&cache->item, old_val);
672 total -= num;
673 blocknr += num;
674 }
675 return 0;
676 }
677
678 int btrfs_copy_pinned(struct btrfs_root *root, struct radix_tree_root *copy)
679 {
680 unsigned long gang[8];
681 u64 last = 0;
682 struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
683 int ret;
684 int i;
685
686 while(1) {
687 ret = find_first_radix_bit(pinned_radix, gang, last,
688 ARRAY_SIZE(gang));
689 if (!ret)
690 break;
691 for (i = 0 ; i < ret; i++) {
692 set_radix_bit(copy, gang[i]);
693 last = gang[i] + 1;
694 }
695 }
696 ret = find_first_radix_bit(&root->fs_info->extent_ins_radix, gang, 0,
697 ARRAY_SIZE(gang));
698 WARN_ON(ret);
699 return 0;
700 }
701
702 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
703 struct btrfs_root *root,
704 struct radix_tree_root *unpin_radix)
705 {
706 unsigned long gang[8];
707 struct btrfs_block_group_cache *block_group;
708 u64 first = 0;
709 int ret;
710 int i;
711 struct radix_tree_root *pinned_radix = &root->fs_info->pinned_radix;
712 struct extent_map_tree *free_space_cache;
713
714 free_space_cache = &root->fs_info->free_space_cache;
715
716 while(1) {
717 ret = find_first_radix_bit(unpin_radix, gang, 0,
718 ARRAY_SIZE(gang));
719 if (!ret)
720 break;
721 if (!first)
722 first = gang[0];
723 for (i = 0; i < ret; i++) {
724 clear_radix_bit(pinned_radix, gang[i]);
725 clear_radix_bit(unpin_radix, gang[i]);
726 block_group = btrfs_lookup_block_group(root->fs_info,
727 gang[i]);
728 if (block_group) {
729 WARN_ON(block_group->pinned == 0);
730 block_group->pinned--;
731 if (gang[i] < block_group->last_alloc)
732 block_group->last_alloc = gang[i];
733 if (!block_group->data) {
734 set_extent_dirty(free_space_cache,
735 gang[i], gang[i],
736 GFP_NOFS);
737 }
738 }
739 }
740 }
741 return 0;
742 }
743
744 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
745 btrfs_root *extent_root)
746 {
747 struct btrfs_key ins;
748 struct btrfs_extent_item extent_item;
749 int i;
750 int ret;
751 int err;
752 unsigned long gang[8];
753 struct btrfs_fs_info *info = extent_root->fs_info;
754
755 btrfs_set_stack_extent_refs(&extent_item, 1);
756 ins.offset = 1;
757 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
758 btrfs_set_stack_extent_owner(&extent_item,
759 extent_root->root_key.objectid);
760
761 while(1) {
762 ret = find_first_radix_bit(&info->extent_ins_radix, gang, 0,
763 ARRAY_SIZE(gang));
764 if (!ret)
765 break;
766
767 for (i = 0; i < ret; i++) {
768 ins.objectid = gang[i];
769 err = btrfs_insert_item(trans, extent_root, &ins,
770 &extent_item,
771 sizeof(extent_item));
772 clear_radix_bit(&info->extent_ins_radix, gang[i]);
773 WARN_ON(err);
774 }
775 }
776 return 0;
777 }
778
779 static int pin_down_block(struct btrfs_root *root, u64 blocknr, int pending)
780 {
781 int err;
782 struct extent_buffer *buf;
783
784 if (!pending) {
785 buf = btrfs_find_tree_block(root, blocknr);
786 if (buf) {
787 if (btrfs_buffer_uptodate(buf)) {
788 u64 transid =
789 root->fs_info->running_transaction->transid;
790 if (btrfs_header_generation(buf) == transid) {
791 free_extent_buffer(buf);
792 return 0;
793 }
794 }
795 free_extent_buffer(buf);
796 }
797 err = set_radix_bit(&root->fs_info->pinned_radix, blocknr);
798 if (!err) {
799 struct btrfs_block_group_cache *cache;
800 cache = btrfs_lookup_block_group(root->fs_info,
801 blocknr);
802 if (cache)
803 cache->pinned++;
804 }
805 } else {
806 err = set_radix_bit(&root->fs_info->pending_del_radix, blocknr);
807 }
808 BUG_ON(err < 0);
809 return 0;
810 }
811
812 /*
813 * remove an extent from the root, returns 0 on success
814 */
815 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
816 *root, u64 blocknr, u64 num_blocks, int pin,
817 int mark_free)
818 {
819 struct btrfs_path *path;
820 struct btrfs_key key;
821 struct btrfs_fs_info *info = root->fs_info;
822 struct btrfs_root *extent_root = info->extent_root;
823 struct extent_buffer *leaf;
824 int ret;
825 struct btrfs_extent_item *ei;
826 u32 refs;
827
828 key.objectid = blocknr;
829 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
830 key.offset = num_blocks;
831
832 path = btrfs_alloc_path();
833 if (!path)
834 return -ENOMEM;
835
836 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
837 if (ret < 0)
838 return ret;
839 BUG_ON(ret);
840
841 leaf = path->nodes[0];
842 ei = btrfs_item_ptr(leaf, path->slots[0],
843 struct btrfs_extent_item);
844 refs = btrfs_extent_refs(leaf, ei);
845 BUG_ON(refs == 0);
846 refs -= 1;
847 btrfs_set_extent_refs(leaf, ei, refs);
848 btrfs_mark_buffer_dirty(leaf);
849
850 if (refs == 0) {
851 u64 super_blocks_used, root_blocks_used;
852
853 if (pin) {
854 ret = pin_down_block(root, blocknr, 0);
855 BUG_ON(ret);
856 }
857
858 /* block accounting for super block */
859 super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
860 btrfs_set_super_blocks_used(&info->super_copy,
861 super_blocks_used - num_blocks);
862
863 /* block accounting for root item */
864 root_blocks_used = btrfs_root_used(&root->root_item);
865 btrfs_set_root_used(&root->root_item,
866 root_blocks_used - num_blocks);
867
868 ret = btrfs_del_item(trans, extent_root, path);
869 if (ret) {
870 return ret;
871 }
872 ret = update_block_group(trans, root, blocknr, num_blocks, 0,
873 mark_free, 0);
874 BUG_ON(ret);
875 }
876 btrfs_free_path(path);
877 finish_current_insert(trans, extent_root);
878 return ret;
879 }
880
881 /*
882 * find all the blocks marked as pending in the radix tree and remove
883 * them from the extent map
884 */
885 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
886 btrfs_root *extent_root)
887 {
888 int ret;
889 int wret;
890 int err = 0;
891 unsigned long gang[4];
892 int i;
893 struct radix_tree_root *pending_radix;
894 struct radix_tree_root *pinned_radix;
895 struct btrfs_block_group_cache *cache;
896
897 pending_radix = &extent_root->fs_info->pending_del_radix;
898 pinned_radix = &extent_root->fs_info->pinned_radix;
899
900 while(1) {
901 ret = find_first_radix_bit(pending_radix, gang, 0,
902 ARRAY_SIZE(gang));
903 if (!ret)
904 break;
905 for (i = 0; i < ret; i++) {
906 wret = set_radix_bit(pinned_radix, gang[i]);
907 if (wret == 0) {
908 cache =
909 btrfs_lookup_block_group(extent_root->fs_info,
910 gang[i]);
911 if (cache)
912 cache->pinned++;
913 }
914 if (wret < 0) {
915 printk(KERN_CRIT "set_radix_bit, err %d\n",
916 wret);
917 BUG_ON(wret < 0);
918 }
919 wret = clear_radix_bit(pending_radix, gang[i]);
920 BUG_ON(wret);
921 wret = __free_extent(trans, extent_root,
922 gang[i], 1, 0, 0);
923 if (wret)
924 err = wret;
925 }
926 }
927 return err;
928 }
929
930 /*
931 * remove an extent from the root, returns 0 on success
932 */
933 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
934 *root, u64 blocknr, u64 num_blocks, int pin)
935 {
936 struct btrfs_root *extent_root = root->fs_info->extent_root;
937 int pending_ret;
938 int ret;
939
940 if (root == extent_root) {
941 pin_down_block(root, blocknr, 1);
942 return 0;
943 }
944 ret = __free_extent(trans, root, blocknr, num_blocks, pin, pin == 0);
945 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
946 return ret ? ret : pending_ret;
947 }
948
949 /*
950 * walks the btree of allocated extents and find a hole of a given size.
951 * The key ins is changed to record the hole:
952 * ins->objectid == block start
953 * ins->flags = BTRFS_EXTENT_ITEM_KEY
954 * ins->offset == number of blocks
955 * Any available blocks before search_start are skipped.
956 */
957 static int find_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
958 *orig_root, u64 num_blocks, u64 empty_size,
959 u64 search_start, u64 search_end, u64 hint_block,
960 struct btrfs_key *ins, u64 exclude_start,
961 u64 exclude_nr, int data)
962 {
963 struct btrfs_path *path;
964 struct btrfs_key key;
965 int ret;
966 u64 hole_size = 0;
967 int slot = 0;
968 u64 last_block = 0;
969 u64 test_block;
970 u64 orig_search_start = search_start;
971 int start_found;
972 struct extent_buffer *l;
973 struct btrfs_root * root = orig_root->fs_info->extent_root;
974 struct btrfs_fs_info *info = root->fs_info;
975 int total_needed = num_blocks;
976 int level;
977 struct btrfs_block_group_cache *block_group;
978 int full_scan = 0;
979 int wrapped = 0;
980 u64 cached_search_start = 0;
981
982 WARN_ON(num_blocks < 1);
983 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
984
985 level = btrfs_header_level(root->node);
986
987 if (search_end == (u64)-1)
988 search_end = btrfs_super_total_blocks(&info->super_copy);
989 if (hint_block) {
990 block_group = btrfs_lookup_block_group(info, hint_block);
991 block_group = btrfs_find_block_group(root, block_group,
992 hint_block, data, 1);
993 } else {
994 block_group = btrfs_find_block_group(root,
995 trans->block_group, 0,
996 data, 1);
997 }
998
999 total_needed += empty_size;
1000 path = btrfs_alloc_path();
1001
1002 check_failed:
1003 search_start = find_search_start(root, &block_group,
1004 search_start, total_needed, data);
1005 cached_search_start = search_start;
1006
1007 btrfs_init_path(path);
1008 ins->objectid = search_start;
1009 ins->offset = 0;
1010 start_found = 0;
1011 path->reada = 2;
1012
1013 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1014 if (ret < 0)
1015 goto error;
1016
1017 if (path->slots[0] > 0) {
1018 path->slots[0]--;
1019 }
1020
1021 l = path->nodes[0];
1022 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1023
1024 /*
1025 * a rare case, go back one key if we hit a block group item
1026 * instead of an extent item
1027 */
1028 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY &&
1029 key.objectid + key.offset >= search_start) {
1030 ins->objectid = key.objectid;
1031 ins->offset = key.offset - 1;
1032 btrfs_release_path(root, path);
1033 ret = btrfs_search_slot(trans, root, ins, path, 0, 0);
1034 if (ret < 0)
1035 goto error;
1036
1037 if (path->slots[0] > 0) {
1038 path->slots[0]--;
1039 }
1040 }
1041
1042 while (1) {
1043 l = path->nodes[0];
1044 slot = path->slots[0];
1045 if (slot >= btrfs_header_nritems(l)) {
1046 ret = btrfs_next_leaf(root, path);
1047 if (ret == 0)
1048 continue;
1049 if (ret < 0)
1050 goto error;
1051 if (!start_found) {
1052 ins->objectid = search_start;
1053 ins->offset = search_end - search_start;
1054 start_found = 1;
1055 goto check_pending;
1056 }
1057 ins->objectid = last_block > search_start ?
1058 last_block : search_start;
1059 ins->offset = search_end - ins->objectid;
1060 goto check_pending;
1061 }
1062
1063 btrfs_item_key_to_cpu(l, &key, slot);
1064 if (key.objectid >= search_start && key.objectid > last_block &&
1065 start_found) {
1066 if (last_block < search_start)
1067 last_block = search_start;
1068 hole_size = key.objectid - last_block;
1069 if (hole_size >= num_blocks) {
1070 ins->objectid = last_block;
1071 ins->offset = hole_size;
1072 goto check_pending;
1073 }
1074 }
1075
1076 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
1077 goto next;
1078
1079 start_found = 1;
1080 last_block = key.objectid + key.offset;
1081
1082 if (!full_scan && last_block >= block_group->key.objectid +
1083 block_group->key.offset) {
1084 btrfs_release_path(root, path);
1085 search_start = block_group->key.objectid +
1086 block_group->key.offset * 2;
1087 goto new_group;
1088 }
1089 next:
1090 path->slots[0]++;
1091 cond_resched();
1092 }
1093 check_pending:
1094 /* we have to make sure we didn't find an extent that has already
1095 * been allocated by the map tree or the original allocation
1096 */
1097 btrfs_release_path(root, path);
1098 BUG_ON(ins->objectid < search_start);
1099
1100 if (ins->objectid + num_blocks >= search_end)
1101 goto enospc;
1102
1103 for (test_block = ins->objectid;
1104 test_block < ins->objectid + num_blocks; test_block++) {
1105 if (test_radix_bit(&info->pinned_radix, test_block) ||
1106 test_radix_bit(&info->extent_ins_radix, test_block)) {
1107 search_start = test_block + 1;
1108 goto new_group;
1109 }
1110 }
1111 if (exclude_nr > 0 && (ins->objectid + num_blocks > exclude_start &&
1112 ins->objectid < exclude_start + exclude_nr)) {
1113 search_start = exclude_start + exclude_nr;
1114 goto new_group;
1115 }
1116 if (!data) {
1117 block_group = btrfs_lookup_block_group(info, ins->objectid);
1118 if (block_group)
1119 trans->block_group = block_group;
1120 }
1121 ins->offset = num_blocks;
1122 btrfs_free_path(path);
1123 if (0 && ins->objectid != cached_search_start) {
1124 printk("\tcached was %Lu found %Lu\n", cached_search_start, ins->objectid);
1125 }
1126 return 0;
1127
1128 new_group:
1129 if (search_start + num_blocks >= search_end) {
1130 enospc:
1131 search_start = orig_search_start;
1132 if (full_scan) {
1133 ret = -ENOSPC;
1134 goto error;
1135 }
1136 if (wrapped) {
1137 if (!full_scan)
1138 total_needed -= empty_size;
1139 full_scan = 1;
1140 } else
1141 wrapped = 1;
1142 }
1143 block_group = btrfs_lookup_block_group(info, search_start);
1144 cond_resched();
1145 if (!full_scan)
1146 block_group = btrfs_find_block_group(root, block_group,
1147 search_start, data, 0);
1148 goto check_failed;
1149
1150 error:
1151 btrfs_release_path(root, path);
1152 btrfs_free_path(path);
1153 return ret;
1154 }
1155 /*
1156 * finds a free extent and does all the dirty work required for allocation
1157 * returns the key for the extent through ins, and a tree buffer for
1158 * the first block of the extent through buf.
1159 *
1160 * returns 0 if everything worked, non-zero otherwise.
1161 */
1162 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
1163 struct btrfs_root *root, u64 owner,
1164 u64 num_blocks, u64 empty_size, u64 hint_block,
1165 u64 search_end, struct btrfs_key *ins, int data)
1166 {
1167 int ret;
1168 int pending_ret;
1169 u64 super_blocks_used, root_blocks_used;
1170 u64 search_start = 0;
1171 struct btrfs_fs_info *info = root->fs_info;
1172 struct btrfs_root *extent_root = info->extent_root;
1173 struct btrfs_extent_item extent_item;
1174
1175 btrfs_set_stack_extent_refs(&extent_item, 1);
1176 btrfs_set_stack_extent_owner(&extent_item, owner);
1177
1178 WARN_ON(num_blocks < 1);
1179 ret = find_free_extent(trans, root, num_blocks, empty_size,
1180 search_start, search_end, hint_block, ins,
1181 trans->alloc_exclude_start,
1182 trans->alloc_exclude_nr, data);
1183 BUG_ON(ret);
1184 if (ret)
1185 return ret;
1186
1187 /* block accounting for super block */
1188 super_blocks_used = btrfs_super_blocks_used(&info->super_copy);
1189 btrfs_set_super_blocks_used(&info->super_copy, super_blocks_used +
1190 num_blocks);
1191
1192 /* block accounting for root item */
1193 root_blocks_used = btrfs_root_used(&root->root_item);
1194 btrfs_set_root_used(&root->root_item, root_blocks_used +
1195 num_blocks);
1196
1197 clear_extent_dirty(&root->fs_info->free_space_cache,
1198 ins->objectid, ins->objectid + ins->offset - 1,
1199 GFP_NOFS);
1200
1201 if (root == extent_root) {
1202 BUG_ON(num_blocks != 1);
1203 set_radix_bit(&root->fs_info->extent_ins_radix, ins->objectid);
1204 goto update_block;
1205 }
1206
1207 WARN_ON(trans->alloc_exclude_nr);
1208 trans->alloc_exclude_start = ins->objectid;
1209 trans->alloc_exclude_nr = ins->offset;
1210 ret = btrfs_insert_item(trans, extent_root, ins, &extent_item,
1211 sizeof(extent_item));
1212
1213 trans->alloc_exclude_start = 0;
1214 trans->alloc_exclude_nr = 0;
1215
1216 BUG_ON(ret);
1217 finish_current_insert(trans, extent_root);
1218 pending_ret = del_pending_extents(trans, extent_root);
1219
1220 if (ret) {
1221 return ret;
1222 }
1223 if (pending_ret) {
1224 return pending_ret;
1225 }
1226
1227 update_block:
1228 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0,
1229 data);
1230 BUG_ON(ret);
1231 return 0;
1232 }
1233
1234 /*
1235 * helper function to allocate a block for a given tree
1236 * returns the tree buffer or NULL.
1237 */
1238 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
1239 struct btrfs_root *root, u64 hint,
1240 u64 empty_size)
1241 {
1242 struct btrfs_key ins;
1243 int ret;
1244 struct extent_buffer *buf;
1245
1246 ret = btrfs_alloc_extent(trans, root, root->root_key.objectid,
1247 1, empty_size, hint, (u64)-1, &ins, 0);
1248 if (ret) {
1249 BUG_ON(ret > 0);
1250 return ERR_PTR(ret);
1251 }
1252 buf = btrfs_find_create_tree_block(root, ins.objectid);
1253 if (!buf) {
1254 btrfs_free_extent(trans, root, ins.objectid, 1, 0);
1255 return ERR_PTR(-ENOMEM);
1256 }
1257 btrfs_set_buffer_uptodate(buf);
1258 buf->alloc_addr = (unsigned long)__builtin_return_address(0);
1259 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
1260 buf->start + buf->len - 1, GFP_NOFS);
1261 /*
1262 set_buffer_checked(buf);
1263 set_buffer_defrag(buf);
1264 */
1265 /* FIXME!!!!!!!!!!!!!!!!
1266 set_radix_bit(&trans->transaction->dirty_pages, buf->pages[0]->index);
1267 */
1268 trans->blocks_used++;
1269 return buf;
1270 }
1271
1272 static int drop_leaf_ref(struct btrfs_trans_handle *trans,
1273 struct btrfs_root *root, struct extent_buffer *leaf)
1274 {
1275 struct btrfs_key key;
1276 struct btrfs_file_extent_item *fi;
1277 int i;
1278 int nritems;
1279 int ret;
1280
1281 BUG_ON(!btrfs_is_leaf(leaf));
1282 nritems = btrfs_header_nritems(leaf);
1283 for (i = 0; i < nritems; i++) {
1284 u64 disk_blocknr;
1285
1286 btrfs_item_key_to_cpu(leaf, &key, i);
1287 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1288 continue;
1289 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1290 if (btrfs_file_extent_type(leaf, fi) ==
1291 BTRFS_FILE_EXTENT_INLINE)
1292 continue;
1293 /*
1294 * FIXME make sure to insert a trans record that
1295 * repeats the snapshot del on crash
1296 */
1297 disk_blocknr = btrfs_file_extent_disk_blocknr(leaf, fi);
1298 if (disk_blocknr == 0)
1299 continue;
1300 ret = btrfs_free_extent(trans, root, disk_blocknr,
1301 btrfs_file_extent_disk_num_blocks(leaf, fi), 0);
1302 BUG_ON(ret);
1303 }
1304 return 0;
1305 }
1306
1307 static void reada_walk_down(struct btrfs_root *root,
1308 struct extent_buffer *node)
1309 {
1310 int i;
1311 u32 nritems;
1312 u64 blocknr;
1313 int ret;
1314 u32 refs;
1315
1316 nritems = btrfs_header_nritems(node);
1317 for (i = 0; i < nritems; i++) {
1318 blocknr = btrfs_node_blockptr(node, i);
1319 ret = lookup_extent_ref(NULL, root, blocknr, 1, &refs);
1320 BUG_ON(ret);
1321 if (refs != 1)
1322 continue;
1323 mutex_unlock(&root->fs_info->fs_mutex);
1324 ret = readahead_tree_block(root, blocknr);
1325 cond_resched();
1326 mutex_lock(&root->fs_info->fs_mutex);
1327 if (ret)
1328 break;
1329 }
1330 }
1331
1332 /*
1333 * helper function for drop_snapshot, this walks down the tree dropping ref
1334 * counts as it goes.
1335 */
1336 static int walk_down_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1337 *root, struct btrfs_path *path, int *level)
1338 {
1339 struct extent_buffer *next;
1340 struct extent_buffer *cur;
1341 u64 blocknr;
1342 int ret;
1343 u32 refs;
1344
1345 WARN_ON(*level < 0);
1346 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1347 ret = lookup_extent_ref(trans, root,
1348 extent_buffer_blocknr(path->nodes[*level]),
1349 1, &refs);
1350 BUG_ON(ret);
1351 if (refs > 1)
1352 goto out;
1353
1354 /*
1355 * walk down to the last node level and free all the leaves
1356 */
1357 while(*level >= 0) {
1358 WARN_ON(*level < 0);
1359 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1360 cur = path->nodes[*level];
1361
1362 if (*level > 0 && path->slots[*level] == 0)
1363 reada_walk_down(root, cur);
1364
1365 if (btrfs_header_level(cur) != *level)
1366 WARN_ON(1);
1367
1368 if (path->slots[*level] >=
1369 btrfs_header_nritems(cur))
1370 break;
1371 if (*level == 0) {
1372 ret = drop_leaf_ref(trans, root, cur);
1373 BUG_ON(ret);
1374 break;
1375 }
1376 blocknr = btrfs_node_blockptr(cur, path->slots[*level]);
1377 ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
1378 BUG_ON(ret);
1379 if (refs != 1) {
1380 path->slots[*level]++;
1381 ret = btrfs_free_extent(trans, root, blocknr, 1, 1);
1382 BUG_ON(ret);
1383 continue;
1384 }
1385 next = btrfs_find_tree_block(root, blocknr);
1386 if (!next || !btrfs_buffer_uptodate(next)) {
1387 free_extent_buffer(next);
1388 mutex_unlock(&root->fs_info->fs_mutex);
1389 next = read_tree_block(root, blocknr);
1390 mutex_lock(&root->fs_info->fs_mutex);
1391
1392 /* we dropped the lock, check one more time */
1393 ret = lookup_extent_ref(trans, root, blocknr, 1, &refs);
1394 BUG_ON(ret);
1395 if (refs != 1) {
1396 path->slots[*level]++;
1397 free_extent_buffer(next);
1398 ret = btrfs_free_extent(trans, root,
1399 blocknr, 1, 1);
1400 BUG_ON(ret);
1401 continue;
1402 }
1403 }
1404 WARN_ON(*level <= 0);
1405 if (path->nodes[*level-1])
1406 free_extent_buffer(path->nodes[*level-1]);
1407 path->nodes[*level-1] = next;
1408 *level = btrfs_header_level(next);
1409 path->slots[*level] = 0;
1410 }
1411 out:
1412 WARN_ON(*level < 0);
1413 WARN_ON(*level >= BTRFS_MAX_LEVEL);
1414 ret = btrfs_free_extent(trans, root,
1415 extent_buffer_blocknr(path->nodes[*level]), 1, 1);
1416 free_extent_buffer(path->nodes[*level]);
1417 path->nodes[*level] = NULL;
1418 *level += 1;
1419 BUG_ON(ret);
1420 return 0;
1421 }
1422
1423 /*
1424 * helper for dropping snapshots. This walks back up the tree in the path
1425 * to find the first node higher up where we haven't yet gone through
1426 * all the slots
1427 */
1428 static int walk_up_tree(struct btrfs_trans_handle *trans, struct btrfs_root
1429 *root, struct btrfs_path *path, int *level)
1430 {
1431 int i;
1432 int slot;
1433 int ret;
1434 struct btrfs_root_item *root_item = &root->root_item;
1435
1436 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
1437 slot = path->slots[i];
1438 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
1439 struct extent_buffer *node;
1440 struct btrfs_disk_key disk_key;
1441 node = path->nodes[i];
1442 path->slots[i]++;
1443 *level = i;
1444 WARN_ON(*level == 0);
1445 btrfs_node_key(node, &disk_key, path->slots[i]);
1446 memcpy(&root_item->drop_progress,
1447 &disk_key, sizeof(disk_key));
1448 root_item->drop_level = i;
1449 return 0;
1450 } else {
1451 ret = btrfs_free_extent(trans, root,
1452 extent_buffer_blocknr(path->nodes[*level]),
1453 1, 1);
1454 BUG_ON(ret);
1455 free_extent_buffer(path->nodes[*level]);
1456 path->nodes[*level] = NULL;
1457 *level = i + 1;
1458 }
1459 }
1460 return 1;
1461 }
1462
1463 /*
1464 * drop the reference count on the tree rooted at 'snap'. This traverses
1465 * the tree freeing any blocks that have a ref count of zero after being
1466 * decremented.
1467 */
1468 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
1469 *root)
1470 {
1471 int ret = 0;
1472 int wret;
1473 int level;
1474 struct btrfs_path *path;
1475 int i;
1476 int orig_level;
1477 struct btrfs_root_item *root_item = &root->root_item;
1478
1479 path = btrfs_alloc_path();
1480 BUG_ON(!path);
1481
1482 level = btrfs_header_level(root->node);
1483 orig_level = level;
1484 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1485 path->nodes[level] = root->node;
1486 extent_buffer_get(root->node);
1487 path->slots[level] = 0;
1488 } else {
1489 struct btrfs_key key;
1490 struct btrfs_disk_key found_key;
1491 struct extent_buffer *node;
1492
1493 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1494 level = root_item->drop_level;
1495 path->lowest_level = level;
1496 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1497 if (wret < 0) {
1498 ret = wret;
1499 goto out;
1500 }
1501 node = path->nodes[level];
1502 btrfs_node_key(node, &found_key, path->slots[level]);
1503 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
1504 sizeof(found_key)));
1505 }
1506 while(1) {
1507 wret = walk_down_tree(trans, root, path, &level);
1508 if (wret > 0)
1509 break;
1510 if (wret < 0)
1511 ret = wret;
1512
1513 wret = walk_up_tree(trans, root, path, &level);
1514 if (wret > 0)
1515 break;
1516 if (wret < 0)
1517 ret = wret;
1518 ret = -EAGAIN;
1519 break;
1520 }
1521 for (i = 0; i <= orig_level; i++) {
1522 if (path->nodes[i]) {
1523 free_extent_buffer(path->nodes[i]);
1524 path->nodes[i] = 0;
1525 }
1526 }
1527 out:
1528 btrfs_free_path(path);
1529 return ret;
1530 }
1531
1532 static int free_block_group_radix(struct radix_tree_root *radix)
1533 {
1534 int ret;
1535 struct btrfs_block_group_cache *cache[8];
1536 int i;
1537
1538 while(1) {
1539 ret = radix_tree_gang_lookup(radix, (void **)cache, 0,
1540 ARRAY_SIZE(cache));
1541 if (!ret)
1542 break;
1543 for (i = 0; i < ret; i++) {
1544 radix_tree_delete(radix, cache[i]->key.objectid +
1545 cache[i]->key.offset - 1);
1546 kfree(cache[i]);
1547 }
1548 }
1549 return 0;
1550 }
1551
1552 int btrfs_free_block_groups(struct btrfs_fs_info *info)
1553 {
1554 int ret;
1555 int ret2;
1556 u64 start;
1557 u64 end;
1558
1559 ret = free_block_group_radix(&info->block_group_radix);
1560 ret2 = free_block_group_radix(&info->block_group_data_radix);
1561 if (ret)
1562 return ret;
1563 if (ret2)
1564 return ret2;
1565
1566 while(1) {
1567 ret = find_first_extent_bit(&info->free_space_cache, 0,
1568 &start, &end, EXTENT_DIRTY);
1569 if (ret)
1570 break;
1571 clear_extent_dirty(&info->free_space_cache, start,
1572 end, GFP_NOFS);
1573 }
1574 return 0;
1575 }
1576
1577 int btrfs_read_block_groups(struct btrfs_root *root)
1578 {
1579 struct btrfs_path *path;
1580 int ret;
1581 int err = 0;
1582 struct btrfs_block_group_cache *cache;
1583 struct btrfs_fs_info *info = root->fs_info;
1584 struct radix_tree_root *radix;
1585 struct btrfs_key key;
1586 struct btrfs_key found_key;
1587 struct extent_buffer *leaf;
1588 u64 group_size_blocks;
1589 u64 used;
1590
1591 group_size_blocks = BTRFS_BLOCK_GROUP_SIZE >>
1592 root->fs_info->sb->s_blocksize_bits;
1593 root = info->extent_root;
1594 key.objectid = 0;
1595 key.offset = group_size_blocks;
1596 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
1597
1598 path = btrfs_alloc_path();
1599 if (!path)
1600 return -ENOMEM;
1601
1602 while(1) {
1603 ret = btrfs_search_slot(NULL, info->extent_root,
1604 &key, path, 0, 0);
1605 if (ret != 0) {
1606 err = ret;
1607 break;
1608 }
1609 leaf = path->nodes[0];
1610 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1611 cache = kmalloc(sizeof(*cache), GFP_NOFS);
1612 if (!cache) {
1613 err = -1;
1614 break;
1615 }
1616
1617 read_extent_buffer(leaf, &cache->item,
1618 btrfs_item_ptr_offset(leaf, path->slots[0]),
1619 sizeof(cache->item));
1620 if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
1621 radix = &info->block_group_data_radix;
1622 cache->data = 1;
1623 } else {
1624 radix = &info->block_group_radix;
1625 cache->data = 0;
1626 }
1627
1628 memcpy(&cache->key, &found_key, sizeof(found_key));
1629 cache->last_alloc = cache->key.objectid;
1630 cache->first_free = cache->key.objectid;
1631 cache->pinned = 0;
1632 cache->cached = 0;
1633
1634 cache->radix = radix;
1635
1636 key.objectid = found_key.objectid + found_key.offset;
1637 btrfs_release_path(root, path);
1638
1639 ret = radix_tree_insert(radix, found_key.objectid +
1640 found_key.offset - 1,
1641 (void *)cache);
1642 BUG_ON(ret);
1643 used = btrfs_block_group_used(&cache->item);
1644 if (used < div_factor(key.offset, 8)) {
1645 radix_tree_tag_set(radix, found_key.objectid +
1646 found_key.offset - 1,
1647 BTRFS_BLOCK_GROUP_AVAIL);
1648 }
1649 if (key.objectid >=
1650 btrfs_super_total_blocks(&info->super_copy))
1651 break;
1652 }
1653
1654 btrfs_free_path(path);
1655 return 0;
1656 }
This page took 0.064296 seconds and 5 git commands to generate.