}
}
-static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
{
struct mmc_blk_data *md = mq->blkdata;
struct mmc_card *card = md->queue.card;
int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
- struct request *req;
+ struct request *old_req;
struct mmc_async_req *new_areq;
struct mmc_async_req *old_areq;
- if (!rqc && !mq->mqrq_prev->req)
+ if (!new_req && !mq->mqrq_prev->req)
return;
do {
- if (rqc) {
+ if (new_req) {
/*
* When 4KB native sector is enabled, only 8 blocks
* multiple read or write is allowed
*/
if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(rqc), 8)) {
+ !IS_ALIGNED(blk_rq_sectors(new_req), 8)) {
pr_err("%s: Transfer size is not 4KB sector size aligned\n",
- rqc->rq_disk->disk_name);
- mmc_blk_rw_cmd_abort(card, rqc);
+ new_req->rq_disk->disk_name);
+ mmc_blk_rw_cmd_abort(card, new_req);
return;
}
*/
mq_rq = container_of(old_areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
- req = mq_rq->req;
- type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ old_req = mq_rq->req;
+ type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) {
*/
mmc_blk_reset_success(md, type);
- ret = blk_end_request(req, 0,
+ ret = blk_end_request(old_req, 0,
brq->data.bytes_xfered);
/*
*/
if (status == MMC_BLK_SUCCESS && ret) {
pr_err("%s BUG rq_tot %d d_xfer %d\n",
- __func__, blk_rq_bytes(req),
+ __func__, blk_rq_bytes(old_req),
brq->data.bytes_xfered);
- mmc_blk_rw_cmd_abort(card, req);
+ mmc_blk_rw_cmd_abort(card, old_req);
return;
}
break;
case MMC_BLK_CMD_ERR:
- ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+ ret = mmc_blk_cmd_err(md, card, brq, old_req, ret);
if (mmc_blk_reset(md, card->host, type)) {
- mmc_blk_rw_cmd_abort(card, req);
- mmc_blk_rw_start_new(mq, card, rqc);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
}
if (!ret) {
- mmc_blk_rw_start_new(mq, card, rqc);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
}
break;
case MMC_BLK_ABORT:
if (!mmc_blk_reset(md, card->host, type))
break;
- mmc_blk_rw_cmd_abort(card, req);
- mmc_blk_rw_start_new(mq, card, rqc);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
case MMC_BLK_DATA_ERR: {
int err;
if (!err)
break;
if (err == -ENODEV) {
- mmc_blk_rw_cmd_abort(card, req);
- mmc_blk_rw_start_new(mq, card, rqc);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
}
/* Fall through */
if (brq->data.blocks > 1) {
/* Redo read one sector at a time */
pr_warn("%s: retrying using single block read\n",
- req->rq_disk->disk_name);
+ old_req->rq_disk->disk_name);
disable_multi = 1;
break;
}
* time, so we only reach here after trying to
* read a single sector.
*/
- ret = blk_end_request(req, -EIO,
+ ret = blk_end_request(old_req, -EIO,
brq->data.blksz);
if (!ret) {
- mmc_blk_rw_start_new(mq, card, rqc);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
}
break;
case MMC_BLK_NOMEDIUM:
- mmc_blk_rw_cmd_abort(card, req);
- mmc_blk_rw_start_new(mq, card, rqc);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
default:
pr_err("%s: Unhandled return value (%d)",
- req->rq_disk->disk_name, status);
- mmc_blk_rw_cmd_abort(card, req);
- mmc_blk_rw_start_new(mq, card, rqc);
+ old_req->rq_disk->disk_name, status);
+ mmc_blk_rw_cmd_abort(card, old_req);
+ mmc_blk_rw_start_new(mq, card, new_req);
return;
}