md/raid10: submit IO from originating thread instead of md thread.
authorNeilBrown <neilb@suse.de>
Thu, 11 Oct 2012 02:32:13 +0000 (13:32 +1100)
committerNeilBrown <neilb@suse.de>
Thu, 11 Oct 2012 02:32:13 +0000 (13:32 +1100)
queuing writes to the md thread means that all requests go through the
one processor which may not be able to keep up with very high request
rates.

So use the plugging infrastructure to submit all requests on unplug.
If a 'schedule' is needed, we fall back on the old approach of handing
the requests to the thread for it to handle.

This is nearly identical to a recent patch which provided similar
functionality to RAID1.

Signed-off-by: NeilBrown <neilb@suse.de>
drivers/md/raid10.c

index f92e0ed59be02550d5c2f3dbd984fd2d857f48db..05dc96a950d505de4b8d0de5aa8820634c5f5e31 100644 (file)
@@ -1055,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
                return rdev->new_data_offset;
 }
 
+struct raid10_plug_cb {
+       struct blk_plug_cb      cb;
+       struct bio_list         pending;
+       int                     pending_cnt;
+};
+
+static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+       struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
+                                                  cb);
+       struct mddev *mddev = plug->cb.data;
+       struct r10conf *conf = mddev->private;
+       struct bio *bio;
+
+       if (from_schedule) {
+               spin_lock_irq(&conf->device_lock);
+               bio_list_merge(&conf->pending_bio_list, &plug->pending);
+               conf->pending_count += plug->pending_cnt;
+               spin_unlock_irq(&conf->device_lock);
+               md_wakeup_thread(mddev->thread);
+               kfree(plug);
+               return;
+       }
+
+       /* we aren't scheduling, so we can do the write-out directly. */
+       bio = bio_list_get(&plug->pending);
+       bitmap_unplug(mddev->bitmap);
+       wake_up(&conf->wait_barrier);
+
+       while (bio) { /* submit pending writes */
+               struct bio *next = bio->bi_next;
+               bio->bi_next = NULL;
+               generic_make_request(bio);
+               bio = next;
+       }
+       kfree(plug);
+}
+
 static void make_request(struct mddev *mddev, struct bio * bio)
 {
        struct r10conf *conf = mddev->private;
@@ -1070,6 +1108,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                                          & (REQ_DISCARD | REQ_SECURE));
        unsigned long flags;
        struct md_rdev *blocked_rdev;
+       struct blk_plug_cb *cb;
+       struct raid10_plug_cb *plug = NULL;
        int sectors_handled;
        int max_sectors;
        int sectors;
@@ -1421,11 +1461,22 @@ retry_write:
                mbio->bi_private = r10_bio;
 
                atomic_inc(&r10_bio->remaining);
+
+               cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
+               if (cb)
+                       plug = container_of(cb, struct raid10_plug_cb, cb);
+               else
+                       plug = NULL;
                spin_lock_irqsave(&conf->device_lock, flags);
-               bio_list_add(&conf->pending_bio_list, mbio);
-               conf->pending_count++;
+               if (plug) {
+                       bio_list_add(&plug->pending, mbio);
+                       plug->pending_cnt++;
+               } else {
+                       bio_list_add(&conf->pending_bio_list, mbio);
+                       conf->pending_count++;
+               }
                spin_unlock_irqrestore(&conf->device_lock, flags);
-               if (!mddev_check_plugged(mddev))
+               if (!plug)
                        md_wakeup_thread(mddev->thread);
 
                if (!r10_bio->devs[i].repl_bio)