static bool scsi_end_request(struct request *req, blk_status_t error,
unsigned int bytes, unsigned int bidi_bytes)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev = cmd->device;
struct request_queue *q = sdev->request_queue;
static int scsi_setup_scsi_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
/*
* Passthrough requests may transfer data, in which case they must
*/
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
if (unlikely(sdev->handler && sdev->handler->prep_fn)) {
int ret = sdev->handler->prep_fn(sdev, req);
static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
if (!blk_rq_bytes(req))
cmd->sc_data_direction = DMA_NONE;
static void scsi_unprep_fn(struct request_queue *q, struct request *req)
{
- scsi_uninit_cmd(req->special);
+ scsi_uninit_cmd(blk_mq_rq_to_pdu(req));
}
/*
*/
static void scsi_kill_request(struct request *req, struct request_queue *q)
{
- struct scsi_cmnd *cmd = req->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct scsi_device *sdev;
struct scsi_target *starget;
struct Scsi_Host *shost;
static void scsi_softirq_done(struct request *rq)
{
- struct scsi_cmnd *cmd = rq->special;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
blk_start_request(req);
spin_unlock_irq(q->queue_lock);
- cmd = req->special;
- if (unlikely(cmd == NULL)) {
+ cmd = blk_mq_rq_to_pdu(req);
+ if (cmd != req->special) {
printk(KERN_CRIT "impossible request in %s.\n"
"please mail a stack trace to "
"linux-scsi@vger.kernel.org\n",