md/raid5: schedule_construction should abort if nothing to do.
authorNeilBrown <neilb@suse.de>
Mon, 4 Mar 2013 01:37:14 +0000 (12:37 +1100)
committerNeilBrown <neilb@suse.de>
Wed, 20 Mar 2013 01:16:51 +0000 (12:16 +1100)
Since commit 1ed850f356a0a422013846b5291acff08815008b
    md/raid5: make sure to_read and to_write never go negative.

It has been possible for handle_stripe_dirtying to be called
when there isn't actually any work to do.
It then calls schedule_reconstruction() which will set R5_LOCKED
on the parity block(s) even when nothing else is happening.
This then causes problems in do_release_stripe().

So add checks to schedule_reconstruction() so that if it doesn't
find anything to do, it just aborts.

This bug was introduced in v3.7, so the patch is suitable
for -stable kernels since then.

Cc: stable@vger.kernel.org (v3.7+)
Reported-by: majianpeng <majianpeng@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/raid5.c

index 35031c8b2d024294991120855b8bac3e077fce8e..5601dda1bc4021564f8b53d474cecf69facef3be 100644 (file)
@@ -2283,17 +2283,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
        int level = conf->level;
 
        if (rcw) {
-               /* if we are not expanding this is a proper write request, and
-                * there will be bios with new data to be drained into the
-                * stripe cache
-                */
-               if (!expand) {
-                       sh->reconstruct_state = reconstruct_state_drain_run;
-                       set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
-               } else
-                       sh->reconstruct_state = reconstruct_state_run;
-
-               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
 
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
@@ -2306,6 +2295,21 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                                s->locked++;
                        }
                }
+               /* if we are not expanding this is a proper write request, and
+                * there will be bios with new data to be drained into the
+                * stripe cache
+                */
+               if (!expand) {
+                       if (!s->locked)
+                               /* False alarm, nothing to do */
+                               return;
+                       sh->reconstruct_state = reconstruct_state_drain_run;
+                       set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+               } else
+                       sh->reconstruct_state = reconstruct_state_run;
+
+               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
+
                if (s->locked + conf->max_degraded == disks)
                        if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
                                atomic_inc(&conf->pending_full_writes);
@@ -2314,11 +2318,6 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
 
-               sh->reconstruct_state = reconstruct_state_prexor_drain_run;
-               set_bit(STRIPE_OP_PREXOR, &s->ops_request);
-               set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
-               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
-
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if (i == pd_idx)
@@ -2333,6 +2332,13 @@ schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s,
                                s->locked++;
                        }
                }
+               if (!s->locked)
+                       /* False alarm - nothing to do */
+                       return;
+               sh->reconstruct_state = reconstruct_state_prexor_drain_run;
+               set_bit(STRIPE_OP_PREXOR, &s->ops_request);
+               set_bit(STRIPE_OP_BIODRAIN, &s->ops_request);
+               set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request);
        }
 
        /* keep the parity disk(s) locked while asynchronous operations