btrfs: make static code static & remove dead code
[deliverable/linux.git] / fs / btrfs / ctree.c
index 566d99b51befe6cb9cacc6c9521bbb6f577b586e..de6de8e60b46019f528b93505d3848adb8699191 100644 (file)
@@ -41,12 +41,7 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
                    int level, int slot);
 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
                                 struct extent_buffer *eb);
-struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
-                                         u32 blocksize, u64 parent_transid,
-                                         u64 time_seq);
-struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
-                                               u64 bytenr, u32 blocksize,
-                                               u64 time_seq);
+static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -208,7 +203,7 @@ struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
  * tree until you end up with a lock on the root.  A locked buffer
  * is returned, with a reference held.
  */
-struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
+static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
 {
        struct extent_buffer *eb;
 
@@ -360,6 +355,44 @@ static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
        write_unlock(&fs_info->tree_mod_log_lock);
 }
 
+/*
+ * Increment the upper half of tree_mod_seq, set lower half zero.
+ *
+ * Must be called with fs_info->tree_mod_seq_lock held.
+ */
+static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
+{
+       u64 seq = atomic64_read(&fs_info->tree_mod_seq);
+       seq &= 0xffffffff00000000ull;
+       seq += 1ull << 32;
+       atomic64_set(&fs_info->tree_mod_seq, seq);
+       return seq;
+}
+
+/*
+ * Increment the lower half of tree_mod_seq.
+ *
+ * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
+ * are generated should not technically require a spin lock here. (Rationale:
+ * incrementing the minor while incrementing the major seq number is between its
+ * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
+ * just returns a unique sequence number as usual.) We have decided to leave
+ * that requirement in here and rethink it once we notice it really imposes a
+ * problem on some workload.
+ */
+static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
+{
+       return atomic64_inc_return(&fs_info->tree_mod_seq);
+}
+
+/*
+ * return the last minor in the previous major tree_mod_seq number
+ */
+u64 btrfs_tree_mod_seq_prev(u64 seq)
+{
+       return (seq & 0xffffffff00000000ull) - 1ull;
+}
+
 /*
  * This adds a new blocker to the tree mod log's blocker list if the @elem
  * passed does not already have a sequence number set. So when a caller expects
@@ -376,10 +409,10 @@ u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
        tree_mod_log_write_lock(fs_info);
        spin_lock(&fs_info->tree_mod_seq_lock);
        if (!elem->seq) {
-               elem->seq = btrfs_inc_tree_mod_seq(fs_info);
+               elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
                list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
        }
-       seq = btrfs_inc_tree_mod_seq(fs_info);
+       seq = btrfs_inc_tree_mod_seq_minor(fs_info);
        spin_unlock(&fs_info->tree_mod_seq_lock);
        tree_mod_log_write_unlock(fs_info);
 
@@ -524,7 +557,10 @@ static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
        if (!tm)
                return -ENOMEM;
 
-       tm->seq = btrfs_inc_tree_mod_seq(fs_info);
+       spin_lock(&fs_info->tree_mod_seq_lock);
+       tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
+       spin_unlock(&fs_info->tree_mod_seq_lock);
+
        return tm->seq;
 }
 
@@ -1281,7 +1317,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                free_extent_buffer(eb_root);
                blocksize = btrfs_level_size(root, old_root->level);
                old = read_tree_block(root, logical, blocksize, 0);
-               if (!old) {
+               if (!old || !extent_buffer_uptodate(old)) {
+                       free_extent_buffer(old);
                        pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
                                logical);
                        WARN_ON(1);
@@ -1526,8 +1563,10 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                        if (!cur) {
                                cur = read_tree_block(root, blocknr,
                                                         blocksize, gen);
-                               if (!cur)
+                               if (!cur || !extent_buffer_uptodate(cur)) {
+                                       free_extent_buffer(cur);
                                        return -EIO;
+                               }
                        } else if (!uptodate) {
                                err = btrfs_read_buffer(cur, gen);
                                if (err) {
@@ -1692,6 +1731,8 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
                                   struct extent_buffer *parent, int slot)
 {
        int level = btrfs_header_level(parent);
+       struct extent_buffer *eb;
+
        if (slot < 0)
                return NULL;
        if (slot >= btrfs_header_nritems(parent))
@@ -1699,9 +1740,15 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
 
        BUG_ON(level == 0);
 
-       return read_tree_block(root, btrfs_node_blockptr(parent, slot),
-                      btrfs_level_size(root, level - 1),
-                      btrfs_node_ptr_generation(parent, slot));
+       eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
+                            btrfs_level_size(root, level - 1),
+                            btrfs_node_ptr_generation(parent, slot));
+       if (eb && !extent_buffer_uptodate(eb)) {
+               free_extent_buffer(eb);
+               eb = NULL;
+       }
+
+       return eb;
 }
 
 /*
This page took 0.026891 seconds and 5 git commands to generate.