Btrfs: fix race between block group relocation and nocow writes
[deliverable/linux.git] / fs / btrfs / ctree.h
index 90e70e21e47957eb7a673d500fc7353ba951beef..7ae758685c7b8be19d3c96fcc5bbaaf2133553d9 100644 (file)
@@ -1419,6 +1419,16 @@ struct btrfs_block_group_cache {
         */
        atomic_t reservations;
 
+       /*
+        * Incremented while holding the spinlock *lock* by a task checking if
+        * it can perform a nocow write (incremented if the value for the *ro*
+        * field is 0). Decremented by such tasks once they create an ordered
+        * extent or before that if some error happens before reaching that step.
+        * This is to prevent races between block group relocation and nocow
+        * writes through direct IO.
+        */
+       atomic_t nocow_writers;
+
        /* Lock for free space tree operations. */
        struct mutex free_space_lock;
 
@@ -3513,6 +3523,9 @@ int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
                                         const u64 start);
 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
+bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
+void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, unsigned long count);
This page took 0.024601 seconds and 5 git commands to generate.