X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=fs%2Fbtrfs%2Fctree.c;h=40f0e0cb804bd07f84f40d51d33dcfa7377f687f;hb=a74a4b97b61beede185b4b3ad359d7d378b0d312;hp=70b6ddfe15a18d172cb8e6e5a109cc1c550ab314;hpb=0b86a832a1f38abec695864ec2eaedc9d2383f1b;p=deliverable%2Flinux.git diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 70b6ddfe15a1..40f0e0cb804b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -21,6 +21,7 @@ #include "disk-io.h" #include "transaction.h" #include "print-tree.h" +#include "locking.h" static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_path *path, int level); @@ -29,7 +30,7 @@ static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_path *path, int data_size, int extend); static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, - struct extent_buffer *src); + struct extent_buffer *src, int empty); static int balance_node_right(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst_buf, @@ -62,12 +63,52 @@ void btrfs_free_path(struct btrfs_path *p) void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) { int i; + int keep = p->keep_locks; + int skip = p->skip_locking; + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { if (!p->nodes[i]) - break; + continue; + if (p->locks[i]) { + btrfs_tree_unlock(p->nodes[i]); + p->locks[i] = 0; + } free_extent_buffer(p->nodes[i]); } memset(p, 0, sizeof(*p)); + p->keep_locks = keep; + p->skip_locking = skip; +} + +struct extent_buffer *btrfs_root_node(struct btrfs_root *root) +{ + struct extent_buffer *eb; + spin_lock(&root->node_lock); + eb = root->node; + extent_buffer_get(eb); + spin_unlock(&root->node_lock); + return eb; +} + +struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root) +{ + struct extent_buffer *eb; + + while(1) { + eb = btrfs_root_node(root); + btrfs_tree_lock(eb); + + spin_lock(&root->node_lock); + if (eb == root->node) { + spin_unlock(&root->node_lock); + break; + } + spin_unlock(&root->node_lock); + + btrfs_tree_unlock(eb); + free_extent_buffer(eb); + } + return eb; } static void add_root_to_dirty_list(struct btrfs_root *root) @@ -111,7 +152,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, } else { first_key.objectid = 0; } - cow = __btrfs_alloc_free_block(trans, new_root, buf->len, + cow = btrfs_alloc_free_block(trans, new_root, buf->len, new_root_objectid, trans->transid, first_key.objectid, level, buf->start, 0); @@ -124,6 +165,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans, btrfs_set_header_bytenr(cow, cow->start); btrfs_set_header_generation(cow, trans->transid); btrfs_set_header_owner(cow, new_root_objectid); + btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN); WARN_ON(btrfs_header_generation(buf) > trans->transid); ret = btrfs_inc_ref(trans, new_root, buf); @@ -150,14 +192,19 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, int ret = 0; int different_trans = 0; int level; + int unlock_orig = 0; struct btrfs_key first_key; + if (*cow_ret == buf) + unlock_orig = 1; + + WARN_ON(!btrfs_tree_locked(buf)); + if (root->ref_cows) { root_gen = trans->transid; } else { root_gen = 0; } - WARN_ON(root->ref_cows && trans->transid != root->fs_info->running_transaction->transid); WARN_ON(root->ref_cows && trans->transid != root->last_trans); @@ -172,7 +219,7 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, } else { first_key.objectid = 0; } - cow = __btrfs_alloc_free_block(trans, root, buf->len, + cow = btrfs_alloc_free_block(trans, root, buf->len, root->root_key.objectid, root_gen, first_key.objectid, level, search_start, empty_size); @@ -183,6 +230,7 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_set_header_bytenr(cow, cow->start); btrfs_set_header_generation(cow, trans->transid); btrfs_set_header_owner(cow, root->root_key.objectid); + btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN); WARN_ON(btrfs_header_generation(buf) > trans->transid); if (btrfs_header_generation(buf) != trans->transid) { @@ -195,9 +243,14 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, } if (buf == root->node) { + WARN_ON(parent && parent != buf); root_gen = btrfs_header_generation(buf); + + spin_lock(&root->node_lock); root->node = cow; extent_buffer_get(cow); + spin_unlock(&root->node_lock); + if (buf != root->commit_root) { btrfs_free_extent(trans, root, buf->start, buf->len, root->root_key.objectid, @@ -218,6 +271,8 @@ int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_header_owner(parent), root_gen, 0, 0, 1); } + if (unlock_orig) + btrfs_tree_unlock(buf); free_extent_buffer(buf); btrfs_mark_buffer_dirty(cow); *cow_ret = cow; @@ -245,11 +300,14 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, } header_trans = btrfs_header_generation(buf); - if (header_trans == trans->transid) { + spin_lock(&root->fs_info->hash_lock); + if (header_trans == trans->transid && + !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { *cow_ret = buf; + spin_unlock(&root->fs_info->hash_lock); return 0; } - + spin_unlock(&root->fs_info->hash_lock); search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1); ret = __btrfs_cow_block(trans, root, buf, parent, parent_slot, cow_ret, search_start, 0); @@ -298,6 +356,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, struct extent_buffer *cur; struct extent_buffer *tmp; u64 blocknr; + u64 gen; u64 search_start = *last_ret; u64 last_block = 0; u64 other; @@ -311,6 +370,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, int progress_passed = 0; struct btrfs_disk_key disk_key; + /* FIXME this code needs locking */ + return 0; + parent_level = btrfs_header_level(parent); if (cache_only && parent_level != 1) return 0; @@ -350,6 +412,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, progress_passed = 1; blocknr = btrfs_node_blockptr(parent, i); + gen = btrfs_node_ptr_generation(parent, i); if (last_block == 0) last_block = blocknr; @@ -357,7 +420,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, other = btrfs_node_blockptr(parent, i - 1); close = close_blocks(blocknr, other, blocksize); } - if (close && i < end_slot - 2) { + if (!close && i < end_slot - 2) { other = btrfs_node_blockptr(parent, i + 1); close = close_blocks(blocknr, other, blocksize); } @@ -373,7 +436,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, cur = btrfs_find_tree_block(root, blocknr, blocksize); if (cur) - uptodate = btrfs_buffer_uptodate(cur); + uptodate = btrfs_buffer_uptodate(cur, gen); else uptodate = 0; if (!cur || !uptodate) { @@ -383,9 +446,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, } if (!cur) { cur = read_tree_block(root, blocknr, - blocksize); + blocksize, gen); } else if (!uptodate) { - btrfs_read_buffer(cur); + btrfs_read_buffer(cur, gen); } } if (search_start == 0) @@ -564,7 +627,17 @@ static int check_leaf(struct btrfs_root *root, struct btrfs_path *path, static int noinline check_block(struct btrfs_root *root, struct btrfs_path *path, int level) { + u64 found_start; return 0; + if (btrfs_header_level(path->nodes[level]) != level) + printk("warning: bad level %Lu wanted %d found %d\n", + path->nodes[level]->start, level, + btrfs_header_level(path->nodes[level])); + found_start = btrfs_header_bytenr(path->nodes[level]); + if (found_start != path->nodes[level]->start) { + printk("warning: bad bytentr %Lu found %Lu\n", + path->nodes[level]->start, found_start); + } #if 0 struct extent_buffer *buf = path->nodes[level]; @@ -681,12 +754,17 @@ static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, static struct extent_buffer *read_node_slot(struct btrfs_root *root, struct extent_buffer *parent, int slot) { + int level = btrfs_header_level(parent); if (slot < 0) return NULL; if (slot >= btrfs_header_nritems(parent)) return NULL; + + BUG_ON(level == 0); + return read_tree_block(root, btrfs_node_blockptr(parent, slot), - btrfs_level_size(root, btrfs_header_level(parent) - 1)); + btrfs_level_size(root, level - 1), + btrfs_node_ptr_generation(parent, slot)); } static int balance_level(struct btrfs_trans_handle *trans, @@ -708,6 +786,7 @@ static int balance_level(struct btrfs_trans_handle *trans, return 0; mid = path->nodes[level]; + WARN_ON(!path->locks[level]); WARN_ON(btrfs_header_generation(mid) != trans->transid); orig_ptr = btrfs_node_blockptr(mid, orig_slot); @@ -728,15 +807,21 @@ static int balance_level(struct btrfs_trans_handle *trans, /* promote the child to a root */ child = read_node_slot(root, mid, 0); + btrfs_tree_lock(child); BUG_ON(!child); ret = btrfs_cow_block(trans, root, child, mid, 0, &child); BUG_ON(ret); + spin_lock(&root->node_lock); root->node = child; + spin_unlock(&root->node_lock); + add_root_to_dirty_list(root); + btrfs_tree_unlock(child); + path->locks[level] = 0; path->nodes[level] = NULL; clean_tree_block(trans, root, mid); - wait_on_tree_block_writeback(root, mid); + btrfs_tree_unlock(mid); /* once for the path */ free_extent_buffer(mid); ret = btrfs_free_extent(trans, root, mid->start, mid->len, @@ -755,6 +840,7 @@ static int balance_level(struct btrfs_trans_handle *trans, left = read_node_slot(root, parent, pslot - 1); if (left) { + btrfs_tree_lock(left); wret = btrfs_cow_block(trans, root, left, parent, pslot - 1, &left); if (wret) { @@ -764,6 +850,7 @@ static int balance_level(struct btrfs_trans_handle *trans, } right = read_node_slot(root, parent, pslot + 1); if (right) { + btrfs_tree_lock(right); wret = btrfs_cow_block(trans, root, right, parent, pslot + 1, &right); if (wret) { @@ -775,7 +862,7 @@ static int balance_level(struct btrfs_trans_handle *trans, /* first, try to make some room in the middle buffer */ if (left) { orig_slot += btrfs_header_nritems(left); - wret = push_node_left(trans, root, left, mid); + wret = push_node_left(trans, root, left, mid, 1); if (wret < 0) ret = wret; if (btrfs_header_nritems(mid) < 2) @@ -786,7 +873,7 @@ static int balance_level(struct btrfs_trans_handle *trans, * then try to empty the right most buffer into the middle */ if (right) { - wret = push_node_left(trans, root, mid, right); + wret = push_node_left(trans, root, mid, right, 1); if (wret < 0 && wret != -ENOSPC) ret = wret; if (btrfs_header_nritems(right) == 0) { @@ -795,7 +882,7 @@ static int balance_level(struct btrfs_trans_handle *trans, u32 blocksize = right->len; clean_tree_block(trans, root, right); - wait_on_tree_block_writeback(root, right); + btrfs_tree_unlock(right); free_extent_buffer(right); right = NULL; wret = del_ptr(trans, root, path, level + 1, pslot + @@ -831,6 +918,11 @@ static int balance_level(struct btrfs_trans_handle *trans, ret = wret; goto enospc; } + if (wret == 1) { + wret = push_node_left(trans, root, left, mid, 1); + if (wret < 0) + ret = wret; + } BUG_ON(wret == 1); } if (btrfs_header_nritems(mid) == 0) { @@ -838,8 +930,9 @@ static int balance_level(struct btrfs_trans_handle *trans, u64 root_gen = btrfs_header_generation(parent); u64 bytenr = mid->start; u32 blocksize = mid->len; + clean_tree_block(trans, root, mid); - wait_on_tree_block_writeback(root, mid); + btrfs_tree_unlock(mid); free_extent_buffer(mid); mid = NULL; wret = del_ptr(trans, root, path, level + 1, pslot); @@ -862,11 +955,14 @@ static int balance_level(struct btrfs_trans_handle *trans, if (left) { if (btrfs_header_nritems(left) > orig_slot) { extent_buffer_get(left); + /* left was locked after cow */ path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; - if (mid) + if (mid) { + btrfs_tree_unlock(mid); free_extent_buffer(mid); + } } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; @@ -878,10 +974,15 @@ static int balance_level(struct btrfs_trans_handle *trans, btrfs_node_blockptr(path->nodes[level], path->slots[level])) BUG(); enospc: - if (right) + if (right) { + btrfs_tree_unlock(right); free_extent_buffer(right); - if (left) + } + if (left) { + if (path->nodes[level] != left) + btrfs_tree_unlock(left); free_extent_buffer(left); + } return ret; } @@ -919,6 +1020,8 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, /* first, try to make some room in the middle buffer */ if (left) { u32 left_nr; + + btrfs_tree_lock(left); left_nr = btrfs_header_nritems(left); if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; @@ -929,7 +1032,7 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, wret = 1; else { wret = push_node_left(trans, root, - left, mid); + left, mid, 0); } } if (wret < 0) @@ -944,24 +1047,28 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, path->nodes[level] = left; path->slots[level + 1] -= 1; path->slots[level] = orig_slot; + btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { orig_slot -= btrfs_header_nritems(left); path->slots[level] = orig_slot; + btrfs_tree_unlock(left); free_extent_buffer(left); } return 0; } + btrfs_tree_unlock(left); free_extent_buffer(left); } - right= read_node_slot(root, parent, pslot + 1); + right = read_node_slot(root, parent, pslot + 1); /* * then try to empty the right most buffer into the middle */ if (right) { u32 right_nr; + btrfs_tree_lock(right); right_nr = btrfs_header_nritems(right); if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { wret = 1; @@ -990,12 +1097,15 @@ static int noinline push_nodes_for_insert(struct btrfs_trans_handle *trans, path->slots[level + 1] += 1; path->slots[level] = orig_slot - btrfs_header_nritems(mid); + btrfs_tree_unlock(mid); free_extent_buffer(mid); } else { + btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; } + btrfs_tree_unlock(right); free_extent_buffer(right); } return 1; @@ -1027,6 +1137,7 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, return; node = path->nodes[level]; + search = btrfs_node_blockptr(node, slot); blocksize = btrfs_level_size(root, level - 1); eb = btrfs_find_tree_block(root, search, blocksize); @@ -1059,7 +1170,8 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, if ((search >= lowest_read && search <= highest_read) || (search < lowest_read && lowest_read - search <= 32768) || (search > highest_read && search - highest_read <= 32768)) { - readahead_tree_block(root, search, blocksize); + readahead_tree_block(root, search, blocksize, + btrfs_node_ptr_generation(node, nr)); nread += blocksize; } nscan++; @@ -1074,6 +1186,43 @@ static void reada_for_search(struct btrfs_root *root, struct btrfs_path *path, highest_read = search; } } + +static void unlock_up(struct btrfs_path *path, int level, int lowest_unlock) +{ + int i; + int skip_level = level; + int no_skips = 0; + struct extent_buffer *t; + + for (i = level; i < BTRFS_MAX_LEVEL; i++) { + if (!path->nodes[i]) + break; + if (!path->locks[i]) + break; + if (!no_skips && path->slots[i] == 0) { + skip_level = i + 1; + continue; + } + if (!no_skips && path->keep_locks) { + u32 nritems; + t = path->nodes[i]; + nritems = btrfs_header_nritems(t); + if (nritems < 1 || path->slots[i] >= nritems - 1) { + skip_level = i + 1; + continue; + } + } + if (skip_level < i && i >= lowest_unlock) + no_skips = 1; + + t = path->nodes[i]; + if (i >= lowest_unlock && i > skip_level && path->locks[i]) { + btrfs_tree_unlock(t); + path->locks[i] = 0; + } + } +} + /* * look for key in the tree. path is filled in with nodes along the way * if key is found, we return zero and you can find the item in the leaf @@ -1092,21 +1241,34 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root ins_len, int cow) { struct extent_buffer *b; - u64 bytenr; - u64 ptr_gen; + struct extent_buffer *tmp; int slot; int ret; int level; int should_reada = p->reada; + int lowest_unlock = 1; + int blocksize; u8 lowest_level = 0; + u64 blocknr; + u64 gen; lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len); WARN_ON(p->nodes[0] != NULL); - WARN_ON(!mutex_is_locked(&root->fs_info->fs_mutex)); + WARN_ON(cow && root == root->fs_info->extent_root && + !mutex_is_locked(&root->fs_info->alloc_mutex)); + WARN_ON(root == root->fs_info->chunk_root && + !mutex_is_locked(&root->fs_info->chunk_mutex)); + WARN_ON(root == root->fs_info->dev_root && + !mutex_is_locked(&root->fs_info->chunk_mutex)); + if (ins_len < 0) + lowest_unlock = 2; again: - b = root->node; - extent_buffer_get(b); + if (p->skip_locking) + b = btrfs_root_node(root); + else + b = btrfs_lock_root_node(root); + while (b) { level = btrfs_header_level(b); if (cow) { @@ -1125,16 +1287,19 @@ again: WARN_ON(1); level = btrfs_header_level(b); p->nodes[level] = b; + if (!p->skip_locking) + p->locks[level] = 1; ret = check_block(root, p, level); if (ret) return -1; + ret = bin_search(b, key, level, &slot); if (level != 0) { if (ret && slot > 0) slot -= 1; p->slots[level] = slot; if (ins_len > 0 && btrfs_header_nritems(b) >= - BTRFS_NODEPTRS_PER_BLOCK(root) - 1) { + BTRFS_NODEPTRS_PER_BLOCK(root) - 3) { int sret = split_node(trans, root, p, level); BUG_ON(sret > 0); if (sret) @@ -1155,22 +1320,46 @@ again: BUG_ON(btrfs_header_nritems(b) == 1); } /* this is only true while dropping a snapshot */ - if (level == lowest_level) + if (level == lowest_level) { + unlock_up(p, level, lowest_unlock); break; - bytenr = btrfs_node_blockptr(b, slot); - ptr_gen = btrfs_node_ptr_generation(b, slot); + } + if (should_reada) reada_for_search(root, p, level, slot, key->objectid); - b = read_tree_block(root, bytenr, - btrfs_level_size(root, level - 1)); - if (ptr_gen != btrfs_header_generation(b)) { - printk("block %llu bad gen wanted %llu " - "found %llu\n", - (unsigned long long)b->start, - (unsigned long long)ptr_gen, - (unsigned long long)btrfs_header_generation(b)); + + blocknr = btrfs_node_blockptr(b, slot); + gen = btrfs_node_ptr_generation(b, slot); + blocksize = btrfs_level_size(root, level - 1); + + tmp = btrfs_find_tree_block(root, blocknr, blocksize); + if (tmp && btrfs_buffer_uptodate(tmp, gen)) { + b = tmp; + } else { + /* + * reduce lock contention at high levels + * of the btree by dropping locks before + * we read. + */ + if (level > 1) { + btrfs_release_path(NULL, p); + if (tmp) + free_extent_buffer(tmp); + tmp = read_tree_block(root, blocknr, + blocksize, gen); + if (tmp) + free_extent_buffer(tmp); + goto again; + } else { + if (tmp) + free_extent_buffer(tmp); + b = read_node_slot(root, b, slot); + } } + if (!p->skip_locking) + btrfs_tree_lock(b); + unlock_up(p, level, lowest_unlock); } else { p->slots[level] = slot; if (ins_len > 0 && btrfs_leaf_free_space(root, b) < @@ -1181,6 +1370,7 @@ again: if (sret) return sret; } + unlock_up(p, level, lowest_unlock); return ret; } } @@ -1227,7 +1417,7 @@ static int fixup_low_keys(struct btrfs_trans_handle *trans, */ static int push_node_left(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *dst, - struct extent_buffer *src) + struct extent_buffer *src, int empty) { int push_items = 0; int src_nritems; @@ -1240,12 +1430,27 @@ static int push_node_left(struct btrfs_trans_handle *trans, WARN_ON(btrfs_header_generation(src) != trans->transid); WARN_ON(btrfs_header_generation(dst) != trans->transid); + if (!empty && src_nritems <= 8) + return 1; + if (push_items <= 0) { return 1; } - if (src_nritems < push_items) - push_items = src_nritems; + if (empty) { + push_items = min(src_nritems, push_items); + if (push_items < src_nritems) { + /* leave at least 8 pointers in the node if + * we aren't going to empty it + */ + if (src_nritems - push_items < 8) { + if (push_items <= 8) + return 1; + push_items -= 8; + } + } + } else + push_items = min(src_nritems - 8, push_items); copy_extent_buffer(dst, src, btrfs_node_key_ptr_offset(dst_nritems), @@ -1291,13 +1496,19 @@ static int balance_node_right(struct btrfs_trans_handle *trans, src_nritems = btrfs_header_nritems(src); dst_nritems = btrfs_header_nritems(dst); push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems; - if (push_items <= 0) + if (push_items <= 0) { + return 1; + } + + if (src_nritems < 4) { return 1; + } max_push = src_nritems / 2 + 1; /* don't try to empty the node */ - if (max_push >= src_nritems) + if (max_push >= src_nritems) { return 1; + } if (max_push < push_items) push_items = max_push; @@ -1335,6 +1546,7 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, u64 lower_gen; struct extent_buffer *lower; struct extent_buffer *c; + struct extent_buffer *old; struct btrfs_disk_key lower_key; BUG_ON(path->nodes[level]); @@ -1351,12 +1563,13 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, else btrfs_node_key(lower, &lower_key, 0); - c = __btrfs_alloc_free_block(trans, root, root->nodesize, + c = btrfs_alloc_free_block(trans, root, root->nodesize, root->root_key.objectid, root_gen, lower_key.objectid, level, root->node->start, 0); if (IS_ERR(c)) return PTR_ERR(c); + memset_extent_buffer(c, 0, 0, root->nodesize); btrfs_set_header_nritems(c, 1); btrfs_set_header_level(c, level); @@ -1367,6 +1580,11 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, write_extent_buffer(c, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(c), BTRFS_FSID_SIZE); + + write_extent_buffer(c, root->fs_info->chunk_tree_uuid, + (unsigned long)btrfs_header_chunk_tree_uuid(c), + BTRFS_UUID_SIZE); + btrfs_set_node_key(c, &lower_key, 0); btrfs_set_node_blockptr(c, 0, lower->start); lower_gen = btrfs_header_generation(lower); @@ -1376,23 +1594,31 @@ static int noinline insert_new_root(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(c); - /* the super has an extra ref to root->node */ - free_extent_buffer(root->node); + spin_lock(&root->node_lock); + old = root->node; root->node = c; + spin_unlock(&root->node_lock); + + /* the super has an extra ref to root->node */ + free_extent_buffer(old); + add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; + path->locks[level] = 1; path->slots[level] = 0; if (root->ref_cows && lower_gen != trans->transid) { struct btrfs_path *back_path = btrfs_alloc_path(); int ret; + mutex_lock(&root->fs_info->alloc_mutex); ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root, path, lower->start, root->root_key.objectid, trans->transid, 0, 0); BUG_ON(ret); + mutex_unlock(&root->fs_info->alloc_mutex); btrfs_free_path(back_path); } return 0; @@ -1468,7 +1694,7 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root ret = push_nodes_for_insert(trans, root, path, level); c = path->nodes[level]; if (!ret && btrfs_header_nritems(c) < - BTRFS_NODEPTRS_PER_BLOCK(root) - 1) + BTRFS_NODEPTRS_PER_BLOCK(root) - 3) return 0; if (ret < 0) return ret; @@ -1481,7 +1707,7 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root root_gen = 0; btrfs_node_key(c, &disk_key, 0); - split = __btrfs_alloc_free_block(trans, root, root->nodesize, + split = btrfs_alloc_free_block(trans, root, root->nodesize, root->root_key.objectid, root_gen, btrfs_disk_key_objectid(&disk_key), @@ -1494,9 +1720,13 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root btrfs_set_header_bytenr(split, split->start); btrfs_set_header_generation(split, trans->transid); btrfs_set_header_owner(split, root->root_key.objectid); + btrfs_set_header_flags(split, 0); write_extent_buffer(split, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(split), BTRFS_FSID_SIZE); + write_extent_buffer(split, root->fs_info->chunk_tree_uuid, + (unsigned long)btrfs_header_chunk_tree_uuid(split), + BTRFS_UUID_SIZE); mid = (c_nritems + 1) / 2; @@ -1520,10 +1750,12 @@ static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root if (path->slots[level] >= mid) { path->slots[level] -= mid; + btrfs_tree_unlock(c); free_extent_buffer(c); path->nodes[level] = split; path->slots[level + 1] += 1; } else { + btrfs_tree_unlock(split); free_extent_buffer(split); } return ret; @@ -1603,32 +1835,27 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root if (slot >= btrfs_header_nritems(upper) - 1) return 1; - right = read_tree_block(root, btrfs_node_blockptr(upper, slot + 1), - root->leafsize); + WARN_ON(!btrfs_tree_locked(path->nodes[1])); + + right = read_node_slot(root, upper, slot + 1); + btrfs_tree_lock(right); free_space = btrfs_leaf_free_space(root, right); - if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(right); - return 1; - } + if (free_space < data_size + sizeof(struct btrfs_item)) + goto out_unlock; /* cow and double check */ ret = btrfs_cow_block(trans, root, right, upper, slot + 1, &right); - if (ret) { - free_extent_buffer(right); - return 1; - } + if (ret) + goto out_unlock; + free_space = btrfs_leaf_free_space(root, right); - if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(right); - return 1; - } + if (free_space < data_size + sizeof(struct btrfs_item)) + goto out_unlock; left_nritems = btrfs_header_nritems(left); - if (left_nritems == 0) { - free_extent_buffer(right); - return 1; - } + if (left_nritems == 0) + goto out_unlock; if (empty) nr = 0; @@ -1664,10 +1891,8 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root left->map_token = NULL; } - if (push_items == 0) { - free_extent_buffer(right); - return 1; - } + if (push_items == 0) + goto out_unlock; if (!empty && push_items == left_nritems) WARN_ON(1); @@ -1735,14 +1960,24 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root /* then fixup the leaf pointer in the path */ if (path->slots[0] >= left_nritems) { path->slots[0] -= left_nritems; + if (btrfs_header_nritems(path->nodes[0]) == 0) + clean_tree_block(trans, root, path->nodes[0]); + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[1] += 1; } else { + btrfs_tree_unlock(right); free_extent_buffer(right); } return 0; + +out_unlock: + btrfs_tree_unlock(right); + free_extent_buffer(right); + return 1; } + /* * push some data in the path leaf to the left, trying to free up at * least data_size bytes. returns zero if the push worked, nonzero otherwise @@ -1779,12 +2014,14 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root return 1; } - left = read_tree_block(root, btrfs_node_blockptr(path->nodes[1], - slot - 1), root->leafsize); + WARN_ON(!btrfs_tree_locked(path->nodes[1])); + + left = read_node_slot(root, path->nodes[1], slot - 1); + btrfs_tree_lock(left); free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } /* cow and double check */ @@ -1792,14 +2029,14 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root path->nodes[1], slot - 1, &left); if (ret) { /* we hit -ENOSPC, but it isn't fatal here */ - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } free_space = btrfs_leaf_free_space(root, left); if (free_space < data_size + sizeof(struct btrfs_item)) { - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } if (empty) @@ -1834,8 +2071,8 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root } if (push_items == 0) { - free_extent_buffer(left); - return 1; + ret = 1; + goto out; } if (!empty && push_items == btrfs_header_nritems(right)) WARN_ON(1); @@ -1933,15 +2170,23 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root /* then fixup the leaf pointer in the path */ if (path->slots[0] < push_items) { path->slots[0] += old_left_nritems; + if (btrfs_header_nritems(path->nodes[0]) == 0) + clean_tree_block(trans, root, path->nodes[0]); + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = left; path->slots[1] -= 1; } else { + btrfs_tree_unlock(left); free_extent_buffer(left); path->slots[0] -= push_items; } BUG_ON(path->slots[0] < 0); return ret; +out: + btrfs_tree_unlock(left); + free_extent_buffer(left); + return ret; } /* @@ -2010,12 +2255,14 @@ again: btrfs_item_key(l, &disk_key, 0); - right = __btrfs_alloc_free_block(trans, root, root->leafsize, + right = btrfs_alloc_free_block(trans, root, root->leafsize, root->root_key.objectid, root_gen, disk_key.objectid, 0, l->start, 0); - if (IS_ERR(right)) + if (IS_ERR(right)) { + BUG_ON(1); return PTR_ERR(right); + } memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header)); btrfs_set_header_bytenr(right, right->start); @@ -2025,6 +2272,10 @@ again: write_extent_buffer(right, root->fs_info->fsid, (unsigned long)btrfs_header_fsid(right), BTRFS_FSID_SIZE); + + write_extent_buffer(right, root->fs_info->chunk_tree_uuid, + (unsigned long)btrfs_header_chunk_tree_uuid(right), + BTRFS_UUID_SIZE); if (mid <= slot) { if (nritems == 1 || leaf_space_used(l, mid, nritems - mid) + space_needed > @@ -2037,10 +2288,13 @@ again: path->slots[1] + 1, 1); if (wret) ret = wret; + + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; path->slots[1] += 1; + btrfs_mark_buffer_dirty(right); return ret; } mid = slot; @@ -2062,6 +2316,7 @@ again: path->slots[1], 1); if (wret) ret = wret; + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] = 0; @@ -2071,6 +2326,7 @@ again: if (wret) ret = wret; } + btrfs_mark_buffer_dirty(right); return ret; } else if (extend && slot == 0) { mid = 1; @@ -2134,12 +2390,15 @@ again: BUG_ON(path->slots[0] != slot); if (mid <= slot) { + btrfs_tree_unlock(path->nodes[0]); free_extent_buffer(path->nodes[0]); path->nodes[0] = right; path->slots[0] -= mid; path->slots[1] += 1; - } else + } else { + btrfs_tree_unlock(right); free_extent_buffer(right); + } BUG_ON(path->slots[0] < 0); @@ -2368,10 +2627,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, total_data += data_size[i]; } - /* create a root if there isn't one */ - if (!root->node) - BUG(); - total_size = total_data + (nr - 1) * sizeof(struct btrfs_item); ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1); if (ret == 0) { @@ -2466,7 +2721,6 @@ int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, btrfs_print_leaf(root, leaf); BUG(); } - out: return ret; } @@ -2605,8 +2859,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, btrfs_set_header_level(leaf, 0); } else { u64 root_gen = btrfs_header_generation(path->nodes[1]); - clean_tree_block(trans, root, leaf); - wait_on_tree_block_writeback(root, leaf); wret = del_ptr(trans, root, path, 1, path->slots[1]); if (wret) ret = wret; @@ -2657,9 +2909,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, root_gen = btrfs_header_generation( path->nodes[1]); - clean_tree_block(trans, root, leaf); - wait_on_tree_block_writeback(root, leaf); - wret = del_ptr(trans, root, path, 1, slot); if (wret) ret = wret; @@ -2672,7 +2921,13 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (wret) ret = wret; } else { - btrfs_mark_buffer_dirty(leaf); + /* if we're still in the path, make sure + * we're dirty. Otherwise, one of the + * push_leaf functions must have already + * dirtied this buffer + */ + if (path->nodes[0] == leaf) + btrfs_mark_buffer_dirty(leaf); free_extent_buffer(leaf); } } else { @@ -2683,60 +2938,40 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, } /* - * walk up the tree as far as required to find the previous leaf. + * search the tree again to find a leaf with lesser keys * returns 0 if it found something or 1 if there are no lesser leaves. * returns < 0 on io errors. */ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) { - u64 bytenr; - int slot; - int level = 1; - struct extent_buffer *c; - struct extent_buffer *next = NULL; - - while(level < BTRFS_MAX_LEVEL) { - if (!path->nodes[level]) - return 1; + struct btrfs_key key; + struct btrfs_disk_key found_key; + int ret; - slot = path->slots[level]; - c = path->nodes[level]; - if (slot == 0) { - level++; - if (level == BTRFS_MAX_LEVEL) - return 1; - continue; - } - slot--; + btrfs_item_key_to_cpu(path->nodes[0], &key, 0); - bytenr = btrfs_node_blockptr(c, slot); - if (next) - free_extent_buffer(next); + if (key.offset > 0) + key.offset--; + else if (key.type > 0) + key.type--; + else if (key.objectid > 0) + key.objectid--; + else + return 1; - next = read_tree_block(root, bytenr, - btrfs_level_size(root, level - 1)); - break; - } - path->slots[level] = slot; - while(1) { - level--; - c = path->nodes[level]; - free_extent_buffer(c); - slot = btrfs_header_nritems(next); - if (slot != 0) - slot--; - path->nodes[level] = next; - path->slots[level] = slot; - if (!level) - break; - next = read_tree_block(root, btrfs_node_blockptr(next, slot), - btrfs_level_size(root, level - 1)); - } - return 0; + btrfs_release_path(root, path); + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + return ret; + btrfs_item_key(path->nodes[0], &found_key, 0); + ret = comp_keys(&found_key, &key); + if (ret < 0) + return 0; + return 1; } /* - * walk up the tree as far as required to find the next leaf. + * search the tree again to find a leaf with greater keys * returns 0 if it found something or 1 if there are no greater leaves. * returns < 0 on io errors. */ @@ -2744,9 +2979,38 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) { int slot; int level = 1; - u64 bytenr; struct extent_buffer *c; struct extent_buffer *next = NULL; + struct btrfs_key key; + u32 nritems; + int ret; + + nritems = btrfs_header_nritems(path->nodes[0]); + if (nritems == 0) { + return 1; + } + + btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1); + + btrfs_release_path(root, path); + path->keep_locks = 1; + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + path->keep_locks = 0; + + if (ret < 0) + return ret; + + nritems = btrfs_header_nritems(path->nodes[0]); + /* + * by releasing the path above we dropped all our locks. A balance + * could have added more items next to the key that used to be + * at the very end of the block. So, check again here and + * advance the path if there are now more items available. + */ + if (nritems > 0 && path->slots[0] < nritems - 1) { + path->slots[0]++; + goto done; + } while(level < BTRFS_MAX_LEVEL) { if (!path->nodes[level]) @@ -2756,36 +3020,50 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) c = path->nodes[level]; if (slot >= btrfs_header_nritems(c)) { level++; - if (level == BTRFS_MAX_LEVEL) + if (level == BTRFS_MAX_LEVEL) { return 1; + } continue; } - bytenr = btrfs_node_blockptr(c, slot); - if (next) + if (next) { + btrfs_tree_unlock(next); free_extent_buffer(next); + } - if (path->reada) + if (level == 1 && path->locks[1] && path->reada) reada_for_search(root, path, level, slot, 0); - next = read_tree_block(root, bytenr, - btrfs_level_size(root, level -1)); + next = read_node_slot(root, c, slot); + if (!path->skip_locking) { + WARN_ON(!btrfs_tree_locked(c)); + btrfs_tree_lock(next); + } break; } path->slots[level] = slot; while(1) { level--; c = path->nodes[level]; + if (path->locks[level]) + btrfs_tree_unlock(c); free_extent_buffer(c); path->nodes[level] = next; path->slots[level] = 0; + if (!path->skip_locking) + path->locks[level] = 1; if (!level) break; - if (path->reada) - reada_for_search(root, path, level, 0, 0); - next = read_tree_block(root, btrfs_node_blockptr(next, 0), - btrfs_level_size(root, level - 1)); + if (level == 1 && path->locks[1] && path->reada) + reada_for_search(root, path, level, slot, 0); + next = read_node_slot(root, next, 0); + if (!path->skip_locking) { + WARN_ON(!btrfs_tree_locked(path->nodes[level])); + btrfs_tree_lock(next); + } } +done: + unlock_up(path, 0, 1); return 0; }