block: fixup block IO unplug trace call
authorJens Axboe <jaxboe@fusionio.com>
Tue, 12 Apr 2011 08:12:19 +0000 (10:12 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Tue, 12 Apr 2011 08:12:19 +0000 (10:12 +0200)
It was removed with the on-stack plugging, readd it and track the
depth of requests added when flushing the plug.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
block/blk-core.c
include/trace/events/block.h
kernel/trace/blktrace.c

index eeaca0998df57aa7c33786edcb5b49b48804ef51..d20ce1e849c82694c06c05d47dbfa405f5ca01eb 100644 (file)
@@ -2668,12 +2668,19 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
        return !(rqa->q <= rqb->q);
 }
 
+static void queue_unplugged(struct request_queue *q, unsigned int depth)
+{
+       trace_block_unplug_io(q, depth);
+       __blk_run_queue(q, false);
+}
+
 static void flush_plug_list(struct blk_plug *plug)
 {
        struct request_queue *q;
        unsigned long flags;
        struct request *rq;
        LIST_HEAD(list);
+       unsigned int depth;
 
        BUG_ON(plug->magic != PLUG_MAGIC);
 
@@ -2688,6 +2695,7 @@ static void flush_plug_list(struct blk_plug *plug)
        }
 
        q = NULL;
+       depth = 0;
        local_irq_save(flags);
        while (!list_empty(&list)) {
                rq = list_entry_rq(list.next);
@@ -2696,10 +2704,11 @@ static void flush_plug_list(struct blk_plug *plug)
                BUG_ON(!rq->q);
                if (rq->q != q) {
                        if (q) {
-                               __blk_run_queue(q, false);
+                               queue_unplugged(q, depth);
                                spin_unlock(q->queue_lock);
                        }
                        q = rq->q;
+                       depth = 0;
                        spin_lock(q->queue_lock);
                }
                rq->cmd_flags &= ~REQ_ON_PLUG;
@@ -2711,10 +2720,12 @@ static void flush_plug_list(struct blk_plug *plug)
                        __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
                else
                        __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
+
+               depth++;
        }
 
        if (q) {
-               __blk_run_queue(q, false);
+               queue_unplugged(q, depth);
                spin_unlock(q->queue_lock);
        }
 
index 43a985390bb6e528264b34562c33ba51705ba56b..006e60b58306555c87e7513e2cfe0a23ce452bf9 100644 (file)
@@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
 
 DECLARE_EVENT_CLASS(block_unplug,
 
-       TP_PROTO(struct request_queue *q),
+       TP_PROTO(struct request_queue *q, unsigned int depth),
 
-       TP_ARGS(q),
+       TP_ARGS(q, depth),
 
        TP_STRUCT__entry(
                __field( int,           nr_rq                   )
@@ -411,7 +411,7 @@ DECLARE_EVENT_CLASS(block_unplug,
        ),
 
        TP_fast_assign(
-               __entry->nr_rq  = q->rq.count[READ] + q->rq.count[WRITE];
+               __entry->nr_rq = depth;
                memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
        ),
 
@@ -421,15 +421,16 @@ DECLARE_EVENT_CLASS(block_unplug,
 /**
  * block_unplug_io - release of operations requests in request queue
  * @q: request queue to unplug
+ * @depth: number of requests just added to the queue
  *
  * Unplug request queue @q because device driver is scheduled to work
  * on elements in the request queue.
  */
 DEFINE_EVENT(block_unplug, block_unplug_io,
 
-       TP_PROTO(struct request_queue *q),
+       TP_PROTO(struct request_queue *q, unsigned int depth),
 
-       TP_ARGS(q)
+       TP_ARGS(q, depth)
 );
 
 /**
index 824708cbfb7b88083ac16d591e09ff121b785bad..3e3970d53d144609af37e47b634faf77692f5396 100644 (file)
@@ -850,13 +850,13 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
                __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
 }
 
-static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
+static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
+                                   unsigned int depth)
 {
        struct blk_trace *bt = q->blk_trace;
 
        if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
+               __be64 rpdu = cpu_to_be64(depth);
 
                __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
                                sizeof(rpdu), &rpdu);