return true;
}
+
+static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
+ unsigned int bouncesz)
+{
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
+ int ret;
+
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ return ret;
+
+ mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ return ret;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ return ret;
+
+ mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret);
+
+ return ret;
+}
#endif
/**
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
+ bool bounce = false;
int ret;
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mqrq_cur->sg = mmc_alloc_sg(1, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_cur->bounce_sg =
- mmc_alloc_sg(bouncesz / 512, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_prev->sg = mmc_alloc_sg(1, &ret);
- if (ret)
- goto cleanup_queue;
-
- mqrq_prev->bounce_sg =
- mmc_alloc_sg(bouncesz / 512, &ret);
+ ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz);
if (ret)
goto cleanup_queue;
+ bounce = true;
}
}
#endif
- if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+ if (!bounce) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));