bio->bi_bdev = bdev->bd_contains;
trace_block_remap(bdev_get_queue(bio->bi_bdev), bio,
- bdev->bd_dev, bio->bi_sector,
+ bdev->bd_dev,
bio->bi_sector - p->start_sect);
}
}
goto end_io;
if (old_sector != -1)
- trace_block_remap(q, bio, old_dev, bio->bi_sector,
- old_sector);
+ trace_block_remap(q, bio, old_dev, old_sector);
trace_block_bio_queue(q, bio);
/* the bio has been remapped so dispatch it */
trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
- tio->io->bio->bi_bdev->bd_dev,
- clone->bi_sector, sector);
+ tio->io->bio->bi_bdev->bd_dev, sector);
generic_make_request(clone);
} else if (r < 0 || r == DM_MAPIO_REQUEUE) {
DECLARE_TRACE(block_remap,
TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t to, sector_t from),
- TP_ARGS(q, bio, dev, to, from));
+ sector_t to),
+ TP_ARGS(q, bio, dev, to));
#endif
* @q: queue the io is for
* @bio: the source bio
* @dev: target device
- * @to: target sector
* @from: source sector
*
* Description:
*
**/
static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
- dev_t dev, sector_t to, sector_t from)
+ dev_t dev, sector_t from)
{
struct blk_trace *bt = q->blk_trace;
struct blk_io_trace_remap r;
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
r.sector_from = cpu_to_be64(from);
- __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP,
- !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
+ __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
+ BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
+ sizeof(r), &r);
}
/**