return 0;
}
+int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
+{
+ int limit = 256 * info->fs_devices->open_devices;
+
+ if (iodone)
+ limit = (limit * 3) / 2;
+ if (atomic_read(&info->nr_async_submits) > limit)
+ return 1;
+
+ return atomic_read(&info->nr_async_bios) > limit;
+}
+
static void run_one_async_submit(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info;
async = container_of(work, struct async_submit_bio, work);
fs_info = BTRFS_I(async->inode)->root->fs_info;
atomic_dec(&fs_info->nr_async_submits);
+
+ if ((async->bio->bi_rw & (1 << BIO_RW)) &&
+ !btrfs_congested_async(fs_info, 1)) {
+ clear_bdi_congested(&fs_info->bdi, WRITE);
+ }
async->submit_bio_hook(async->inode, async->rw, async->bio,
async->mirror_num);
kfree(async);
{
struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
int ret = 0;
- int limit = 256 * info->fs_devices->open_devices;
struct list_head *cur;
struct btrfs_device *device;
struct backing_dev_info *bdi;
if ((bdi_bits & (1 << BDI_write_congested)) &&
- atomic_read(&info->nr_async_submits) > limit) {
+ btrfs_congested_async(info, 0))
return 1;
- }
list_for_each(cur, &info->fs_devices->devices) {
device = list_entry(cur, struct btrfs_device, dev_list);
INIT_LIST_HEAD(&fs_info->space_info);
btrfs_mapping_init(&fs_info->mapping_tree);
atomic_set(&fs_info->nr_async_submits, 0);
+ atomic_set(&fs_info->nr_async_bios, 0);
atomic_set(&fs_info->throttles, 0);
atomic_set(&fs_info->throttle_gen, 0);
fs_info->sb = sb;
* cannot dynamically grow.
*/
btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
- btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
+ btrfs_init_workers(&fs_info->submit_workers,
+ min_t(u64, fs_devices->num_devices,
+ fs_info->thread_pool_size));
/* a higher idle thresh on the submit workers makes it much more
* likely that bios will be send down in a sane order to the
free_extent_buffer(root->fs_info->dev_root->node);
btrfs_free_block_groups(root->fs_info);
+ fs_info->closing = 2;
del_fs_roots(fs_info);
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
struct extent_io_tree *tree;
u64 num_dirty;
u64 start = 0;
- unsigned long thresh = 16 * 1024 * 1024;
+ unsigned long thresh = 2 * 1024 * 1024;
tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
if (current_is_pdflush())