if (ret)
return NULL;
- block_group = (struct btrfs_block_group_cache *)ptr;
+ block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
if (block_group->key.objectid <= bytenr && bytenr <=
{
int ret;
struct btrfs_block_group_cache *cache = *cache_ret;
- u64 last = max(search_start, cache->key.objectid);
+ u64 last;
u64 start = 0;
u64 end = 0;
+ u64 cache_miss = 0;
+ int wrapped = 0;
again:
ret = cache_block_group(root, cache);
if (ret)
goto out;
+
+ last = max(search_start, cache->key.objectid);
+
while(1) {
ret = find_first_extent_bit(&root->fs_info->free_space_cache,
last, &start, &end, EXTENT_DIRTY);
- if (ret)
- goto out;
+ if (ret) {
+ if (!cache_miss)
+ cache_miss = last;
+ goto new_group;
+ }
start = max(last, start);
last = end + 1;
- if (end + 1 - start < num)
+ if (last - start < num) {
+ if (last == cache->key.objectid + cache->key.offset)
+ cache_miss = start;
continue;
- if (start + num >= cache->key.objectid + cache->key.offset)
+ }
+ if (data != BTRFS_BLOCK_GROUP_MIXED &&
+ start + num >= cache->key.objectid + cache->key.offset)
goto new_group;
return start;
}
return search_start;
new_group:
- cache = btrfs_lookup_block_group(root->fs_info,
- last + cache->key.offset - 1);
+ last = cache->key.objectid + cache->key.offset;
+wrapped:
+ cache = btrfs_lookup_block_group(root->fs_info, last);
if (!cache) {
+ if (!wrapped) {
+ wrapped = 1;
+ last = search_start;
+ data = BTRFS_BLOCK_GROUP_MIXED;
+ goto wrapped;
+ }
return search_start;
}
- cache = btrfs_find_block_group(root, cache,
- last + cache->key.offset - 1, data, 0);
+ if (cache_miss && !cache->cached) {
+ cache_block_group(root, cache);
+ last = cache_miss;
+
+ cache = btrfs_lookup_block_group(root->fs_info, last);
+ }
+ cache = btrfs_find_block_group(root, cache, last, data, 0);
*cache_ret = cache;
- last = min(cache->key.objectid, last);
+ cache_miss = 0;
goto again;
}
static u64 div_factor(u64 num, int factor)
{
+ if (factor == 10)
+ return num;
num *= factor;
do_div(num, 10);
return num;
block_group_cache = &info->block_group_cache;
if (!owner)
- factor = 5;
+ factor = 8;
- if (data)
+ if (data == BTRFS_BLOCK_GROUP_MIXED) {
+ bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
+ factor = 10;
+ } else if (data)
bit = BLOCK_GROUP_DATA;
else
bit = BLOCK_GROUP_METADATA;
if (search_start) {
struct btrfs_block_group_cache *shint;
shint = btrfs_lookup_block_group(info, search_start);
- if (shint && shint->data == data) {
+ if (shint && (shint->data == data ||
+ shint->data == BTRFS_BLOCK_GROUP_MIXED)) {
used = btrfs_block_group_used(&shint->item);
- if (used < div_factor(shint->key.offset, factor)) {
+ if (used + shint->pinned <
+ div_factor(shint->key.offset, factor)) {
return shint;
}
}
}
- if (hint && hint->data == data) {
+ if (hint && (hint->data == data ||
+ hint->data == BTRFS_BLOCK_GROUP_MIXED)) {
used = btrfs_block_group_used(&hint->item);
- if (used < div_factor(hint->key.offset, factor)) {
+ if (used + hint->pinned <
+ div_factor(hint->key.offset, factor)) {
return hint;
}
- last = hint->key.offset * 3;
- if (hint->key.objectid >= last)
- last = max(search_start + hint->key.offset - 1,
- hint->key.objectid - last);
- else
- last = hint->key.objectid + hint->key.offset;
+ last = hint->key.objectid + hint->key.offset;
hint_last = last;
} else {
if (hint)
if (ret)
break;
- cache = (struct btrfs_block_group_cache *)ptr;
+ cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item);
free_check = cache->key.offset;
else
free_check = div_factor(cache->key.offset, factor);
-
- if (used < free_check) {
+ if (used + cache->pinned < free_check) {
found_group = cache;
goto found;
}
if (ret)
break;
- cache = (struct btrfs_block_group_cache *)ptr;
+ cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
err = write_one_cache_group(trans, root,
path, cache);
/*
old_val < (cache->key.offset >> 1)) {
int bit_to_clear;
int bit_to_set;
-
cache->data = data;
if (data) {
- bit_to_clear = BLOCK_GROUP_DATA;
- bit_to_set = BLOCK_GROUP_METADATA;
+ bit_to_clear = BLOCK_GROUP_METADATA;
+ bit_to_set = BLOCK_GROUP_DATA;
+ cache->item.flags &=
+ ~BTRFS_BLOCK_GROUP_MIXED;
cache->item.flags |=
BTRFS_BLOCK_GROUP_DATA;
} else {
- bit_to_clear = BLOCK_GROUP_METADATA;
- bit_to_set = BLOCK_GROUP_DATA;
+ bit_to_clear = BLOCK_GROUP_DATA;
+ bit_to_set = BLOCK_GROUP_METADATA;
+ cache->item.flags &=
+ ~BTRFS_BLOCK_GROUP_MIXED;
cache->item.flags &=
~BTRFS_BLOCK_GROUP_DATA;
}
set_extent_bits(&info->block_group_cache,
start, end, bit_to_set,
GFP_NOFS);
+ } else if (cache->data != data &&
+ cache->data != BTRFS_BLOCK_GROUP_MIXED) {
+ cache->data = BTRFS_BLOCK_GROUP_MIXED;
+ set_extent_bits(&info->block_group_cache,
+ start, end,
+ BLOCK_GROUP_DATA |
+ BLOCK_GROUP_METADATA,
+ GFP_NOFS);
}
old_val += num_bytes;
} else {
}
return 0;
}
+static int update_pinned_extents(struct btrfs_root *root,
+ u64 bytenr, u64 num, int pin)
+{
+ u64 len;
+ struct btrfs_block_group_cache *cache;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
+ if (pin) {
+ set_extent_dirty(&fs_info->pinned_extents,
+ bytenr, bytenr + num - 1, GFP_NOFS);
+ } else {
+ clear_extent_dirty(&fs_info->pinned_extents,
+ bytenr, bytenr + num - 1, GFP_NOFS);
+ }
+ while (num > 0) {
+ cache = btrfs_lookup_block_group(fs_info, bytenr);
+ WARN_ON(!cache);
+ len = min(num, cache->key.offset -
+ (bytenr - cache->key.objectid));
+ if (pin) {
+ cache->pinned += len;
+ fs_info->total_pinned += len;
+ } else {
+ cache->pinned -= len;
+ fs_info->total_pinned -= len;
+ }
+ bytenr += len;
+ num -= len;
+ }
+ return 0;
+}
int btrfs_copy_pinned(struct btrfs_root *root, struct extent_map_tree *copy)
{
u64 start;
u64 end;
int ret;
- struct extent_map_tree *pinned_extents = &root->fs_info->pinned_extents;
struct extent_map_tree *free_space_cache;
-
free_space_cache = &root->fs_info->free_space_cache;
while(1) {
EXTENT_DIRTY);
if (ret)
break;
-
- clear_extent_dirty(pinned_extents, start, end,
- GFP_NOFS);
+ update_pinned_extents(root, start, end + 1 - start, 0);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
}
root->fs_info->running_transaction->transid;
if (btrfs_header_generation(buf) == transid) {
free_extent_buffer(buf);
- return 0;
+ return 1;
}
}
free_extent_buffer(buf);
}
- set_extent_dirty(&root->fs_info->pinned_extents,
- bytenr, bytenr + num_bytes - 1, GFP_NOFS);
+ update_pinned_extents(root, bytenr, num_bytes, 1);
} else {
set_extent_bits(&root->fs_info->pending_del,
bytenr, bytenr + num_bytes - 1,
if (pin) {
ret = pin_down_bytes(root, bytenr, num_bytes, 0);
- BUG_ON(ret);
+ if (ret > 0)
+ mark_free = 1;
+ BUG_ON(ret < 0);
}
/* block accounting for super block */
EXTENT_LOCKED);
if (ret)
break;
-
- set_extent_dirty(pinned_extents, start, end, GFP_NOFS);
+ update_pinned_extents(extent_root, start, end + 1 - start, 1);
clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
GFP_NOFS);
ret = __free_extent(trans, extent_root,
struct btrfs_block_group_cache *block_group;
int full_scan = 0;
int wrapped = 0;
+ u64 cached_start;
WARN_ON(num_bytes < root->sectorsize);
btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
level = btrfs_header_level(root->node);
+ if (num_bytes >= 96 * 1024 * 1024 && hint_byte) {
+ data = BTRFS_BLOCK_GROUP_MIXED;
+ }
+
if (search_end == (u64)-1)
search_end = btrfs_super_total_bytes(&info->super_copy);
if (hint_byte) {
check_failed:
search_start = find_search_start(root, &block_group,
search_start, total_needed, data);
+ cached_start = search_start;
btrfs_init_path(path);
ins->objectid = search_start;
continue;
if (ret < 0)
goto error;
+
+ search_start = max(search_start,
+ block_group->key.objectid);
if (!start_found) {
ins->objectid = search_start;
ins->offset = search_end - search_start;
ins->objectid = last_byte > search_start ?
last_byte : search_start;
ins->offset = search_end - ins->objectid;
+ BUG_ON(ins->objectid >= search_end);
goto check_pending;
}
btrfs_item_key_to_cpu(l, &key, slot);
start_found = 1;
last_byte = key.objectid + key.offset;
- if (!full_scan && last_byte >= block_group->key.objectid +
+ if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
+ last_byte >= block_group->key.objectid +
block_group->key.offset) {
btrfs_release_path(root, path);
search_start = block_group->key.objectid +
- block_group->key.offset * 2;
+ block_group->key.offset;
goto new_group;
}
next:
if (ins->objectid + num_bytes >= search_end)
goto enospc;
+ if (!full_scan && data != BTRFS_BLOCK_GROUP_MIXED &&
+ ins->objectid + num_bytes >= block_group->
+ key.objectid + block_group->key.offset) {
+ search_start = block_group->key.objectid +
+ block_group->key.offset;
+ goto new_group;
+ }
if (test_range_bit(&info->extent_ins, ins->objectid,
ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
search_start = ins->objectid + num_bytes;
set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
ins->objectid + ins->offset - 1,
EXTENT_LOCKED, GFP_NOFS);
+ WARN_ON(data == 1);
goto update_block;
}
return ERR_PTR(-ENOMEM);
}
btrfs_set_buffer_uptodate(buf);
- buf->alloc_addr = (unsigned long)__builtin_return_address(0);
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
buf->start + buf->len - 1, GFP_NOFS);
- /*
- set_buffer_checked(buf);
- set_buffer_defrag(buf);
- */
- /* FIXME!!!!!!!!!!!!!!!!
- set_radix_bit(&trans->transaction->dirty_pages, buf->pages[0]->index);
- */
+ set_extent_bits(&BTRFS_I(root->fs_info->btree_inode)->extent_tree,
+ buf->start, buf->start + buf->len - 1,
+ EXTENT_CSUM, GFP_NOFS);
+ buf->flags |= EXTENT_CSUM;
+ btrfs_set_buffer_defrag(buf);
trans->blocks_used++;
return buf;
}
for (i = 0; i <= orig_level; i++) {
if (path->nodes[i]) {
free_extent_buffer(path->nodes[i]);
- path->nodes[i] = 0;
+ path->nodes[i] = NULL;
}
}
out:
{
u64 start;
u64 end;
+ u64 ptr;
int ret;
-
while(1) {
ret = find_first_extent_bit(&info->block_group_cache, 0,
&start, &end, (unsigned int)-1);
if (ret)
break;
+ ret = get_state_private(&info->block_group_cache, start, &ptr);
+ if (!ret)
+ kfree((void *)(unsigned long)ptr);
clear_extent_bits(&info->block_group_cache, start,
end, (unsigned int)-1, GFP_NOFS);
}
sizeof(cache->item));
memcpy(&cache->key, &found_key, sizeof(found_key));
cache->cached = 0;
-
+ cache->pinned = 0;
key.objectid = found_key.objectid + found_key.offset;
btrfs_release_path(root, path);
- if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
+ if (cache->item.flags & BTRFS_BLOCK_GROUP_MIXED) {
+ bit = BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA;
+ cache->data = BTRFS_BLOCK_GROUP_MIXED;
+ } else if (cache->item.flags & BTRFS_BLOCK_GROUP_DATA) {
bit = BLOCK_GROUP_DATA;
- cache->data = 1;
+ cache->data = BTRFS_BLOCK_GROUP_DATA;
} else {
bit = BLOCK_GROUP_METADATA;
cache->data = 0;
found_key.objectid + found_key.offset - 1,
bit | EXTENT_LOCKED, GFP_NOFS);
set_state_private(block_group_cache, found_key.objectid,
- (u64)cache);
+ (unsigned long)cache);
if (key.objectid >=
btrfs_super_total_bytes(&info->super_copy))