Btrfs: Transaction commit: don't use filemap_fdatawait
[deliverable/linux.git] / fs / btrfs / disk-io.c
index 56c54a41dbbb3bd917463c94db87c73f5463b8c1..9601b13c7d7a9486c814f176104520090610d425 100644 (file)
@@ -429,6 +429,18 @@ int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
        return 0;
 }
 
+int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
+{
+       int limit = 256 * info->fs_devices->open_devices;
+
+       if (iodone)
+               limit = (limit * 3) / 2;
+       if (atomic_read(&info->nr_async_submits) > limit)
+               return 1;
+
+       return atomic_read(&info->nr_async_bios) > limit;
+}
+
 static void run_one_async_submit(struct btrfs_work *work)
 {
        struct btrfs_fs_info *fs_info;
@@ -437,6 +449,11 @@ static void run_one_async_submit(struct btrfs_work *work)
        async = container_of(work, struct  async_submit_bio, work);
        fs_info = BTRFS_I(async->inode)->root->fs_info;
        atomic_dec(&fs_info->nr_async_submits);
+
+       if ((async->bio->bi_rw & (1 << BIO_RW)) &&
+           !btrfs_congested_async(fs_info, 1)) {
+               clear_bdi_congested(&fs_info->bdi, WRITE);
+       }
        async->submit_bio_hook(async->inode, async->rw, async->bio,
                               async->mirror_num);
        kfree(async);
@@ -938,15 +955,13 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 {
        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
        int ret = 0;
-       int limit = 256 * info->fs_devices->open_devices;
        struct list_head *cur;
        struct btrfs_device *device;
        struct backing_dev_info *bdi;
 
        if ((bdi_bits & (1 << BDI_write_congested)) &&
-           atomic_read(&info->nr_async_submits) > limit) {
+           btrfs_congested_async(info, 0))
                return 1;
-       }
 
        list_for_each(cur, &info->fs_devices->devices) {
                device = list_entry(cur, struct btrfs_device, dev_list);
@@ -1250,6 +1265,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        INIT_LIST_HEAD(&fs_info->space_info);
        btrfs_mapping_init(&fs_info->mapping_tree);
        atomic_set(&fs_info->nr_async_submits, 0);
+       atomic_set(&fs_info->nr_async_bios, 0);
        atomic_set(&fs_info->throttles, 0);
        atomic_set(&fs_info->throttle_gen, 0);
        fs_info->sb = sb;
@@ -1348,7 +1364,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
         * cannot dynamically grow.
         */
        btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
-       btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
+       btrfs_init_workers(&fs_info->submit_workers,
+                          min_t(u64, fs_devices->num_devices,
+                          fs_info->thread_pool_size));
 
        /* a higher idle thresh on the submit workers makes it much more
         * likely that bios will be send down in a sane order to the
@@ -1823,7 +1841,7 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
        struct extent_io_tree *tree;
        u64 num_dirty;
        u64 start = 0;
-       unsigned long thresh = 16 * 1024 * 1024;
+       unsigned long thresh = 2 * 1024 * 1024;
        tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 
        if (current_is_pdflush())
This page took 0.026769 seconds and 5 git commands to generate.