nvme: untangle 0 and BLK_MQ_RQ_QUEUE_OK
authorOmar Sandoval <osandov@fb.com>
Tue, 15 Nov 2016 19:11:58 +0000 (11:11 -0800)
committerJens Axboe <axboe@fb.com>
Tue, 15 Nov 2016 19:50:11 +0000 (12:50 -0700)
Let's not depend on any of the BLK_MQ_RQ_QUEUE_* constants having
specific values. No functional change.

Signed-off-by: Omar Sandoval <osandov@fb.com>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
drivers/nvme/host/core.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/loop.c

index 53584d21c805222a5466a5777d44846e9abae8cd..e54bb10ead9cf9eeb4331c3857bda5122a404c27 100644 (file)
@@ -269,7 +269,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
         */
        req->__data_len = nr_bytes;
 
-       return 0;
+       return BLK_MQ_RQ_QUEUE_OK;
 }
 
 static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -317,7 +317,7 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
                struct nvme_command *cmd)
 {
-       int ret = 0;
+       int ret = BLK_MQ_RQ_QUEUE_OK;
 
        if (req->cmd_type == REQ_TYPE_DRV_PRIV)
                memcpy(cmd, nvme_req(req)->cmd, sizeof(*cmd));
index 51d13d5ec7a84b613a351944a045389d30387b1f..d58f8e4e2c06f2a2f1bb55d15751f71192940951 100644 (file)
@@ -328,7 +328,7 @@ static int nvme_init_iod(struct request *rq, unsigned size,
                rq->retries = 0;
                rq->rq_flags |= RQF_DONTPREP;
        }
-       return 0;
+       return BLK_MQ_RQ_QUEUE_OK;
 }
 
 static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -598,17 +598,17 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        map_len = nvme_map_len(req);
        ret = nvme_init_iod(req, map_len, dev);
-       if (ret)
+       if (ret != BLK_MQ_RQ_QUEUE_OK)
                return ret;
 
        ret = nvme_setup_cmd(ns, req, &cmnd);
-       if (ret)
+       if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out;
 
        if (req->nr_phys_segments)
                ret = nvme_map_data(dev, req, map_len, &cmnd);
 
-       if (ret)
+       if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out;
 
        cmnd.common.command_id = req->tag;
index c4700efc03906dabeccf44a3cae0060803fcddb0..ff1b340606b277924ff58e28303324c7b7bf4742 100644 (file)
@@ -1395,7 +1395,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
                        sizeof(struct nvme_command), DMA_TO_DEVICE);
 
        ret = nvme_setup_cmd(ns, rq, c);
-       if (ret)
+       if (ret != BLK_MQ_RQ_QUEUE_OK)
                return ret;
 
        c->common.command_id = rq->tag;
index 26aa3a5afb0dbf0b79a13cf606930af9df49479d..be56d0567155a204a57390c9bc06fa2d1fb74cf3 100644 (file)
@@ -169,7 +169,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        int ret;
 
        ret = nvme_setup_cmd(ns, req, &iod->cmd);
-       if (ret)
+       if (ret != BLK_MQ_RQ_QUEUE_OK)
                return ret;
 
        iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -179,7 +179,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                nvme_cleanup_cmd(req);
                blk_mq_start_request(req);
                nvme_loop_queue_response(&iod->req);
-               return 0;
+               return BLK_MQ_RQ_QUEUE_OK;
        }
 
        if (blk_rq_bytes(req)) {
@@ -198,7 +198,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
        blk_mq_start_request(req);
 
        schedule_work(&iod->work);
-       return 0;
+       return BLK_MQ_RQ_QUEUE_OK;
 }
 
 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)