Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[deliverable/linux.git] / fs / btrfs / extent-tree.c
index 6d5c5f73ad6432b8755bb419f66ad3c06780c98e..0236de711989097bbf5191dbb6871281d840de22 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kthread.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/percpu_counter.h>
 #include "compat.h"
 #include "hash.h"
 #include "ctree.h"
@@ -3357,6 +3358,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        struct btrfs_space_info *found;
        int i;
        int factor;
+       int ret;
 
        if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
                     BTRFS_BLOCK_GROUP_RAID10))
@@ -3380,6 +3382,12 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
        if (!found)
                return -ENOMEM;
 
+       ret = percpu_counter_init(&found->total_bytes_pinned, 0);
+       if (ret) {
+               kfree(found);
+               return ret;
+       }
+
        for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
                INIT_LIST_HEAD(&found->block_groups[i]);
        init_rwsem(&found->groups_sem);
@@ -3612,10 +3620,11 @@ alloc:
                }
 
                /*
-                * If we have less pinned bytes than we want to allocate then
-                * don't bother committing the transaction, it won't help us.
+                * If we don't have enough pinned space to deal with this
+                * allocation don't bother committing the transaction.
                 */
-               if (data_sinfo->bytes_pinned < bytes)
+               if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
+                                          bytes) < 0)
                        committed = 1;
                spin_unlock(&data_sinfo->lock);
 
@@ -3624,6 +3633,7 @@ commit_trans:
                if (!committed &&
                    !atomic_read(&root->fs_info->open_ioctl_trans)) {
                        committed = 1;
+
                        trans = btrfs_join_transaction(root);
                        if (IS_ERR(trans))
                                return PTR_ERR(trans);
@@ -3656,6 +3666,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
 
        data_sinfo = root->fs_info->data_sinfo;
        spin_lock(&data_sinfo->lock);
+       WARN_ON(data_sinfo->bytes_may_use < bytes);
        data_sinfo->bytes_may_use -= bytes;
        trace_btrfs_space_reservation(root->fs_info, "space_info",
                                      data_sinfo->flags, bytes, 0);
@@ -3933,12 +3944,11 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
                                         unsigned long nr_pages)
 {
        struct super_block *sb = root->fs_info->sb;
-       int started;
 
-       /* If we can not start writeback, just sync all the delalloc file. */
-       started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
-                                                     WB_REASON_FS_FREE_SPACE);
-       if (!started) {
+       if (down_read_trylock(&sb->s_umount)) {
+               writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
+               up_read(&sb->s_umount);
+       } else {
                /*
                 * We needn't worry the filesystem going from r/w to r/o though
                 * we don't acquire ->s_umount mutex, because the filesystem
@@ -4044,7 +4054,8 @@ static int may_commit_transaction(struct btrfs_root *root,
 
        /* See if there is enough pinned space to make this reservation */
        spin_lock(&space_info->lock);
-       if (space_info->bytes_pinned >= bytes) {
+       if (percpu_counter_compare(&space_info->total_bytes_pinned,
+                                  bytes) >= 0) {
                spin_unlock(&space_info->lock);
                goto commit;
        }
@@ -4059,7 +4070,8 @@ static int may_commit_transaction(struct btrfs_root *root,
 
        spin_lock(&space_info->lock);
        spin_lock(&delayed_rsv->lock);
-       if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
+       if (percpu_counter_compare(&space_info->total_bytes_pinned,
+                                  bytes - delayed_rsv->size) >= 0) {
                spin_unlock(&delayed_rsv->lock);
                spin_unlock(&space_info->lock);
                return -ENOSPC;
@@ -5397,6 +5409,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        struct btrfs_caching_control *next;
        struct btrfs_caching_control *caching_ctl;
        struct btrfs_block_group_cache *cache;
+       struct btrfs_space_info *space_info;
 
        down_write(&fs_info->extent_commit_sem);
 
@@ -5419,6 +5432,9 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
 
        up_write(&fs_info->extent_commit_sem);
 
+       list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
+               percpu_counter_set(&space_info->total_bytes_pinned, 0);
+
        update_global_block_rsv(fs_info);
 }
 
@@ -5516,6 +5532,27 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
+                            u64 owner, u64 root_objectid)
+{
+       struct btrfs_space_info *space_info;
+       u64 flags;
+
+       if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+               if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
+                       flags = BTRFS_BLOCK_GROUP_SYSTEM;
+               else
+                       flags = BTRFS_BLOCK_GROUP_METADATA;
+       } else {
+               flags = BTRFS_BLOCK_GROUP_DATA;
+       }
+
+       space_info = __find_space_info(fs_info, flags);
+       BUG_ON(!space_info); /* Logic bug */
+       percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
+}
+
+
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 parent,
@@ -5736,6 +5773,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                goto out;
                        }
                }
+               add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
+                                root_objectid);
        } else {
                if (found_extent) {
                        BUG_ON(is_data && refs_to_drop !=
@@ -5859,6 +5898,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           u64 parent, int last_ref)
 {
        struct btrfs_block_group_cache *cache = NULL;
+       int pin = 1;
        int ret;
 
        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
@@ -5891,8 +5931,14 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 
                btrfs_add_free_space(cache, buf->start, buf->len);
                btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
+               pin = 0;
        }
 out:
+       if (pin)
+               add_pinned_bytes(root->fs_info, buf->len,
+                                btrfs_header_level(buf),
+                                root->root_key.objectid);
+
        /*
         * Deleting the buffer, clear the corrupt flag since it doesn't matter
         * anymore.
@@ -5909,6 +5955,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        int ret;
        struct btrfs_fs_info *fs_info = root->fs_info;
 
+       add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
+
        /*
         * tree log blocks never actually go into the extent allocation
         * tree, just update pinning info and exit early.
@@ -7902,6 +7950,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
        struct btrfs_space_info *space_info;
        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
        struct btrfs_device *device;
+       struct btrfs_trans_handle *trans;
        u64 min_free;
        u64 dev_min = 1;
        u64 dev_nr = 0;
@@ -7988,6 +8037,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                do_div(min_free, dev_min);
        }
 
+       /* We need to do this so that we can look at pending chunks */
+       trans = btrfs_join_transaction(root);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out;
+       }
+
        mutex_lock(&root->fs_info->chunk_mutex);
        list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
                u64 dev_offset;
@@ -7998,7 +8054,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                 */
                if (device->total_bytes > device->bytes_used + min_free &&
                    !device->is_tgtdev_for_dev_replace) {
-                       ret = find_free_dev_extent(device, min_free,
+                       ret = find_free_dev_extent(trans, device, min_free,
                                                   &dev_offset, NULL);
                        if (!ret)
                                dev_nr++;
@@ -8010,6 +8066,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                }
        }
        mutex_unlock(&root->fs_info->chunk_mutex);
+       btrfs_end_transaction(trans, root);
 out:
        btrfs_put_block_group(block_group);
        return ret;
@@ -8152,6 +8209,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                                dump_space_info(space_info, 0, 0);
                        }
                }
+               percpu_counter_destroy(&space_info->total_bytes_pinned);
                list_del(&space_info->list);
                kfree(space_info);
        }
@@ -8374,6 +8432,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
                                        sizeof(item));
                if (ret)
                        btrfs_abort_transaction(trans, extent_root, ret);
+               ret = btrfs_finish_chunk_alloc(trans, extent_root,
+                                              key.objectid, key.offset);
+               if (ret)
+                       btrfs_abort_transaction(trans, extent_root, ret);
        }
 }
 
This page took 0.033138 seconds and 5 git commands to generate.