This reverts commit
3a366e614d0837d9fc23f78cdb1a1186ebc3387f.
Wanlong Gao reports that it causes a kernel panic on his machine several
minutes after boot. Reverting it removes the panic.
Jens says:
"It's not quite clear why that is yet, so I think we should just revert
the commit for 3.9 final (which I'm assuming is pretty close).
The wifi is crap at the LSF hotel, so sending this email instead of
queueing up a revert and pull request."
Reported-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
Requested-by: Jens Axboe <axboe@kernel.dk>
Cc: Tejun Heo <tj@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
+EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
+ trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
+ trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
+ raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
if ( rw == WRITE )
md_write_end(mddev);
+ trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
+ bi, 0);
bio_endio(bi, 0);
}
}
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0)
+ if (remaining == 0) {
+ trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
+ raid_bio, 0);
bio_endio(raid_bio, 0);
+ }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
error = -EIO;
- trace_block_bio_complete(bio, error);
-
if (bio->bi_end_io)
bio->bi_end_io(bio, error);
}
struct blk_trace {
int trace_state;
- bool rq_based;
struct rchan *rchan;
unsigned long __percpu *sequence;
unsigned char __percpu *msg_data;
/**
* block_bio_complete - completed all work on the block operation
+ * @q: queue holding the block operation
* @bio: block operation completed
* @error: io error value
*
*/
TRACE_EVENT(block_bio_complete,
- TP_PROTO(struct bio *bio, int error),
+ TP_PROTO(struct request_queue *q, struct bio *bio, int error),
- TP_ARGS(bio, error),
+ TP_ARGS(q, bio, error),
TP_STRUCT__entry(
__field( dev_t, dev )
),
TP_fast_assign(
- __entry->dev = bio->bi_bdev ?
- bio->bi_bdev->bd_dev : 0;
+ __entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9;
__entry->error = error;
struct request_queue *q,
struct request *rq)
{
- struct blk_trace *bt = q->blk_trace;
-
- /* if control ever passes through here, it's a request based driver */
- if (unlikely(bt && !bt->rq_based))
- bt->rq_based = true;
-
blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
}
blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
}
-static void blk_add_trace_bio_complete(void *ignore, struct bio *bio, int error)
+static void blk_add_trace_bio_complete(void *ignore,
+ struct request_queue *q, struct bio *bio,
+ int error)
{
- struct request_queue *q;
- struct blk_trace *bt;
-
- if (!bio->bi_bdev)
- return;
-
- q = bdev_get_queue(bio->bi_bdev);
- bt = q->blk_trace;
-
- /*
- * Request based drivers will generate both rq and bio completions.
- * Ignore bio ones.
- */
- if (likely(!bt) || bt->rq_based)
- return;
-
blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
}