Add UNPLUG traces to all appropriate places
authorAlan D. Brunelle <Alan.Brunelle@hp.com>
Wed, 7 Nov 2007 19:26:56 +0000 (14:26 -0500)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 9 Nov 2007 12:41:32 +0000 (13:41 +0100)
Added blk_unplug interface, allowing all invocations of unplugs to result
in a generated blktrace UNPLUG.

Signed-off-by: Alan D. Brunelle <Alan.Brunelle@hp.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
block/ll_rw_blk.c
drivers/md/bitmap.c
drivers/md/dm-table.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
include/linux/blkdev.h

index 3d489915fd22841c65c76083e401ac7ed7b60d77..3b927be038501f178577da45ebb447c6dc27b90f 100644 (file)
@@ -1621,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 {
        struct request_queue *q = bdi->unplug_io_data;
 
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
-       if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
-               q->unplug_fn(q);
-       }
+       blk_unplug(q);
 }
 
 static void blk_unplug_work(struct work_struct *work)
@@ -1653,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data)
        kblockd_schedule_work(&q->unplug_work);
 }
 
+void blk_unplug(struct request_queue *q)
+{
+       /*
+        * devices don't necessarily have an ->unplug_fn defined
+        */
+       if (q->unplug_fn) {
+               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                                       q->rq.count[READ] + q->rq.count[WRITE]);
+
+               q->unplug_fn(q);
+       }
+}
+EXPORT_SYMBOL(blk_unplug);
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
index 7c426d07a555c0426855571552289c6ed9fcaaaa..1b1ef3130e6e8cbbe32ea6d659882b92a408d59e 100644 (file)
@@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
                        prepare_to_wait(&bitmap->overflow_wait, &__wait,
                                        TASK_UNINTERRUPTIBLE);
                        spin_unlock_irq(&bitmap->lock);
-                       bitmap->mddev->queue
-                               ->unplug_fn(bitmap->mddev->queue);
+                       blk_unplug(bitmap->mddev->queue);
                        schedule();
                        finish_wait(&bitmap->overflow_wait, &__wait);
                        continue;
index 5a7eb650181e140b85d0ba8367e9f82c0e492923..e298d8d11f24bb1892d959833dbd84cf4f9e16e6 100644 (file)
@@ -1000,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t)
                struct dm_dev *dd = list_entry(d, struct dm_dev, list);
                struct request_queue *q = bdev_get_queue(dd->bdev);
 
-               if (q->unplug_fn)
-                       q->unplug_fn(q);
+               blk_unplug(q);
        }
 }
 
index 56a11f6c127b888f0518b8da9c2dd838d70fc74c..3dac1cfb81896655f953939a447bb731e9539747 100644 (file)
@@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q)
 
        for (i=0; i < mddev->raid_disks; i++) {
                struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
-               if (r_queue->unplug_fn)
-                       r_queue->unplug_fn(r_queue);
+               blk_unplug(r_queue);
        }
 }
 
index 808cd95494563d260456733e0d1e9ba80cbee21e..cef9ebd5a04652d4c3e9f293448c45425531d775 100644 (file)
@@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev)
                 * about not overloading the IO subsystem. (things like an
                 * e2fsck being done on the RAID array should execute fast)
                 */
-               mddev->queue->unplug_fn(mddev->queue);
+               blk_unplug(mddev->queue);
                cond_resched();
 
                currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev)
         * this also signals 'finished resyncing' to md_stop
         */
  out:
-       mddev->queue->unplug_fn(mddev->queue);
+       blk_unplug(mddev->queue);
 
        wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
 
index b35731cceac671ee647e108174a44cacfa82c381..eb631ebed6860b3f19ae8c3161a10e509d448d3e 100644 (file)
@@ -125,8 +125,7 @@ static void unplug_slaves(mddev_t *mddev)
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
 
-                       if (r_queue->unplug_fn)
-                               r_queue->unplug_fn(r_queue);
+                       blk_unplug(r_queue);
 
                        rdev_dec_pending(rdev, mddev);
                        rcu_read_lock();
index c111105fc2dc66ae162cb44039b27ccd100ad352..f8e591708d1fa7768fda354aa8da006b1f26d549 100644 (file)
@@ -35,8 +35,7 @@ static void raid0_unplug(struct request_queue *q)
        for (i=0; i<mddev->raid_disks; i++) {
                struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
 
-               if (r_queue->unplug_fn)
-                       r_queue->unplug_fn(r_queue);
+               blk_unplug(r_queue);
        }
 }
 
index 85478d6a9c1af645dfdfdb824aa1b6c7286e9daf..4a69c416e045c97f24ef4582c9d3b9f83588ba15 100644 (file)
@@ -549,8 +549,7 @@ static void unplug_slaves(mddev_t *mddev)
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
 
-                       if (r_queue->unplug_fn)
-                               r_queue->unplug_fn(r_queue);
+                       blk_unplug(r_queue);
 
                        rdev_dec_pending(rdev, mddev);
                        rcu_read_lock();
index fc6607acb6e4734f12c4dbed23bf9e82ab5816ec..5cdcc938620050b39fb30593565847859d6ad1a4 100644 (file)
@@ -593,8 +593,7 @@ static void unplug_slaves(mddev_t *mddev)
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
 
-                       if (r_queue->unplug_fn)
-                               r_queue->unplug_fn(r_queue);
+                       blk_unplug(r_queue);
 
                        rdev_dec_pending(rdev, mddev);
                        rcu_read_lock();
index 82af3465a900883061c69d6f2b52549fdb3e6e02..1cfc984cc7b7158c5c49b11f209ec4c606f1508e 100644 (file)
@@ -3186,8 +3186,7 @@ static void unplug_slaves(mddev_t *mddev)
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
 
-                       if (r_queue->unplug_fn)
-                               r_queue->unplug_fn(r_queue);
+                       blk_unplug(r_queue);
 
                        rdev_dec_pending(rdev, mddev);
                        rcu_read_lock();
index 8396db24d01902d9502016776bc9685c3ccea9f1..d18ee67b40f8198118e93336aee272324d9c30db 100644 (file)
@@ -697,6 +697,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
                                  struct request *, int, rq_end_io_fn *);
 extern int blk_verify_command(unsigned char *, int);
+extern void blk_unplug(struct request_queue *q);
 
 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
 {