MD RAID5: Avoid accessing gendisk or queue structs when not available
authorJonathan Brassow <jbrassow@redhat.com>
Thu, 7 Mar 2013 22:22:01 +0000 (16:22 -0600)
committerNeilBrown <neilb@suse.de>
Wed, 20 Mar 2013 02:16:57 +0000 (13:16 +1100)
MD RAID5:  Fix kernel oops when RAID4/5/6 is used via device-mapper

Commit a9add5d (v3.8-rc1) added blktrace calls to the RAID4/5/6 driver.
However, when device-mapper is used to create RAID4/5/6 arrays, the
mddev->gendisk and mddev->queue fields are not setup.  Therefore, calling
things like trace_block_bio_remap will cause a kernel oops.  This patch
conditionalizes those calls on whether the proper fields exist to make
the calls.  (Device-mapper will call trace_block_bio_remap on its own.)

This patch is suitable for the 3.8.y stable kernel.

Cc: stable@vger.kernel.org (v3.8+)
Signed-off-by: Jonathan Brassow <jbrassow@redhat.com>
Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/raid5.c

index 5601dda1bc4021564f8b53d474cecf69facef3be..52ba88a106684a45dbc5f57e3ce2f14ad06251d5 100644 (file)
@@ -674,9 +674,11 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        bi->bi_next = NULL;
                        if (rrdev)
                                set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
-                       trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
-                                             bi, disk_devt(conf->mddev->gendisk),
-                                             sh->dev[i].sector);
+
+                       if (conf->mddev->gendisk)
+                               trace_block_bio_remap(bdev_get_queue(bi->bi_bdev),
+                                                     bi, disk_devt(conf->mddev->gendisk),
+                                                     sh->dev[i].sector);
                        generic_make_request(bi);
                }
                if (rrdev) {
@@ -704,9 +706,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                        rbi->bi_io_vec[0].bv_offset = 0;
                        rbi->bi_size = STRIPE_SIZE;
                        rbi->bi_next = NULL;
-                       trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
-                                             rbi, disk_devt(conf->mddev->gendisk),
-                                             sh->dev[i].sector);
+                       if (conf->mddev->gendisk)
+                               trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
+                                                     rbi, disk_devt(conf->mddev->gendisk),
+                                                     sh->dev[i].sector);
                        generic_make_request(rbi);
                }
                if (!rdev && !rrdev) {
@@ -2835,8 +2838,10 @@ static void handle_stripe_dirtying(struct r5conf *conf,
        set_bit(STRIPE_HANDLE, &sh->state);
        if (rmw < rcw && rmw > 0) {
                /* prefer read-modify-write, but need to get some data */
-               blk_add_trace_msg(conf->mddev->queue, "raid5 rmw %llu %d",
-                                 (unsigned long long)sh->sector, rmw);
+               if (conf->mddev->queue)
+                       blk_add_trace_msg(conf->mddev->queue,
+                                         "raid5 rmw %llu %d",
+                                         (unsigned long long)sh->sector, rmw);
                for (i = disks; i--; ) {
                        struct r5dev *dev = &sh->dev[i];
                        if ((dev->towrite || i == sh->pd_idx) &&
@@ -2886,7 +2891,7 @@ static void handle_stripe_dirtying(struct r5conf *conf,
                                }
                        }
                }
-               if (rcw)
+               if (rcw && conf->mddev->queue)
                        blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
                                          (unsigned long long)sh->sector,
                                          rcw, qread, test_bit(STRIPE_DELAYED, &sh->state));
@@ -3993,9 +3998,10 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
                atomic_inc(&conf->active_aligned_reads);
                spin_unlock_irq(&conf->device_lock);
 
-               trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
-                                     align_bi, disk_devt(mddev->gendisk),
-                                     raid_bio->bi_sector);
+               if (mddev->gendisk)
+                       trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
+                                             align_bi, disk_devt(mddev->gendisk),
+                                             raid_bio->bi_sector);
                generic_make_request(align_bi);
                return 1;
        } else {
@@ -4089,7 +4095,8 @@ static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule)
                }
                spin_unlock_irq(&conf->device_lock);
        }
-       trace_block_unplug(mddev->queue, cnt, !from_schedule);
+       if (mddev->queue)
+               trace_block_unplug(mddev->queue, cnt, !from_schedule);
        kfree(cb);
 }