md/raid5: be more selective about distributing flags across batch.
[deliverable/linux.git] / drivers / md / raid5.c
index 6de2e1edd49278289c042935cc8be1db8dad85fd..1141b7f62e6e84b46a4e83358afffe54d89cf712 100644 (file)
@@ -3534,10 +3534,27 @@ unhash:
                                      struct stripe_head, batch_list);
                list_del_init(&sh->batch_list);
 
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
+               WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+                                         (1 << STRIPE_SYNCING) |
+                                         (1 << STRIPE_REPLACED) |
+                                         (1 << STRIPE_PREREAD_ACTIVE) |
+                                         (1 << STRIPE_DELAYED) |
+                                         (1 << STRIPE_BIT_DELAY) |
+                                         (1 << STRIPE_FULL_WRITE) |
+                                         (1 << STRIPE_BIOFILL_RUN) |
+                                         (1 << STRIPE_COMPUTE_RUN)  |
+                                         (1 << STRIPE_OPS_REQ_PENDING) |
+                                         (1 << STRIPE_DISCARD) |
+                                         (1 << STRIPE_BATCH_READY) |
+                                         (1 << STRIPE_BATCH_ERR) |
+                                         (1 << STRIPE_BITMAP_PENDING)));
+               WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+                                             (1 << STRIPE_REPLACED)));
+
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_DEGRADED)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+
                sh->check_state = head_sh->check_state;
                sh->reconstruct_state = head_sh->reconstruct_state;
                for (i = 0; i < sh->disks; i++) {
@@ -3549,7 +3566,7 @@ unhash:
                spin_lock_irq(&sh->stripe_lock);
                sh->batch_head = NULL;
                spin_unlock_irq(&sh->stripe_lock);
-               if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
+               if (sh->state & STRIPE_EXPAND_SYNC_FLAGS)
                        set_bit(STRIPE_HANDLE, &sh->state);
                release_stripe(sh);
        }
@@ -3559,7 +3576,7 @@ unhash:
        spin_unlock_irq(&head_sh->stripe_lock);
        if (wakeup_nr)
                wake_up(&conf->wait_for_overlap);
-       if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
+       if (head_sh->state & STRIPE_EXPAND_SYNC_FLAGS)
                set_bit(STRIPE_HANDLE, &head_sh->state);
 }
 
@@ -4235,7 +4252,8 @@ static int clear_batch_ready(struct stripe_head *sh)
        return 0;
 }
 
-static void break_stripe_batch_list(struct stripe_head *head_sh)
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags)
 {
        struct stripe_head *sh, *next;
        int i;
@@ -4245,11 +4263,27 @@ static void break_stripe_batch_list(struct stripe_head *head_sh)
 
                list_del_init(&sh->batch_list);
 
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                (1 << STRIPE_DEGRADED) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
+               WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+                                         (1 << STRIPE_SYNCING) |
+                                         (1 << STRIPE_REPLACED) |
+                                         (1 << STRIPE_PREREAD_ACTIVE) |
+                                         (1 << STRIPE_DELAYED) |
+                                         (1 << STRIPE_BIT_DELAY) |
+                                         (1 << STRIPE_FULL_WRITE) |
+                                         (1 << STRIPE_BIOFILL_RUN) |
+                                         (1 << STRIPE_COMPUTE_RUN)  |
+                                         (1 << STRIPE_OPS_REQ_PENDING) |
+                                         (1 << STRIPE_DISCARD) |
+                                         (1 << STRIPE_BATCH_READY) |
+                                         (1 << STRIPE_BATCH_ERR) |
+                                         (1 << STRIPE_BITMAP_PENDING)));
+               WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+                                             (1 << STRIPE_REPLACED)));
+
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_DEGRADED)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+
                sh->check_state = head_sh->check_state;
                sh->reconstruct_state = head_sh->reconstruct_state;
                for (i = 0; i < sh->disks; i++) {
@@ -4261,8 +4295,9 @@ static void break_stripe_batch_list(struct stripe_head *head_sh)
                spin_lock_irq(&sh->stripe_lock);
                sh->batch_head = NULL;
                spin_unlock_irq(&sh->stripe_lock);
-
-               set_bit(STRIPE_HANDLE, &sh->state);
+               if (handle_flags == 0 ||
+                   sh->state & handle_flags)
+                       set_bit(STRIPE_HANDLE, &sh->state);
                release_stripe(sh);
        }
        spin_lock_irq(&head_sh->stripe_lock);
@@ -4271,6 +4306,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh)
        for (i = 0; i < head_sh->disks; i++)
                if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
                        do_wakeup = 1;
+       if (head_sh->state & handle_flags)
+               set_bit(STRIPE_HANDLE, &head_sh->state);
 
        if (do_wakeup)
                wake_up(&head_sh->raid_conf->wait_for_overlap);
@@ -4299,7 +4336,7 @@ static void handle_stripe(struct stripe_head *sh)
        }
 
        if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
-               break_stripe_batch_list(sh);
+               break_stripe_batch_list(sh, 0);
 
        if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
                spin_lock(&sh->stripe_lock);
This page took 0.028324 seconds and 5 git commands to generate.