blk_queue_write_cache(sdkp->disk->queue, true, false);
-and handle empty REQ_FLUSH requests in its prep_fn/request_fn. Note that
+and handle empty REQ_OP_FLUSH requests in its prep_fn/request_fn. Note that
REQ_FLUSH requests with a payload are automatically turned into a sequence
-of an empty REQ_FLUSH request followed by the actual write by the block
+of an empty REQ_OP_FLUSH request followed by the actual write by the block
layer. For devices that also support the FUA bit the block layer needs
to be told to pass through the REQ_FUA bit using:
and the driver must handle write requests that have the REQ_FUA bit set
in prep_fn/request_fn. If the FUA bit is not natively supported the block
-layer turns it into an empty REQ_FLUSH request after the actual write.
+layer turns it into an empty REQ_OP_FLUSH request after the actual write.
req = dev->request;
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
io_req = kmalloc(sizeof(struct io_thread_req),
GFP_ATOMIC);
if (io_req == NULL) {
* The actual execution of flush is double buffered. Whenever a request
* needs to execute PRE or POSTFLUSH, it queues at
* fq->flush_queue[fq->flush_pending_idx]. Once certain criteria are met, a
- * flush is issued and the pending_idx is toggled. When the flush
+ * REQ_OP_FLUSH is issued and the pending_idx is toggled. When the flush
* completes, all the requests which were pending are proceeded to the next
* step. This allows arbitrary merging of different types of FLUSH/FUA
* requests.
}
flush_rq->cmd_type = REQ_TYPE_FS;
- flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ req_set_op_attrs(flush_rq, REQ_OP_FLUSH, WRITE_FLUSH | REQ_FLUSH_SEQ);
flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
if (op_is_write(req_op(rq))) {
- if (rq->cmd_flags & REQ_FLUSH)
+ if (req_op(rq) == REQ_OP_FLUSH)
ret = lo_req_flush(lo, rq);
else if (req_op(rq) == REQ_OP_DISCARD)
ret = lo_discard(lo, rq, pos);
if (lo->lo_state != Lo_bound)
return -EIO;
- if (lo->use_dio && (!(cmd->rq->cmd_flags & REQ_FLUSH) ||
+ if (lo->use_dio && (req_op(cmd->rq) != REQ_OP_FLUSH ||
req_op(cmd->rq) == REQ_OP_DISCARD))
cmd->use_aio = true;
else
type = NBD_CMD_DISC;
else if (req_op(req) == REQ_OP_DISCARD)
type = NBD_CMD_TRIM;
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
type = NBD_CMD_FLUSH;
else if (rq_data_dir(req) == WRITE)
type = NBD_CMD_WRITE;
* driver-specific, etc.
*/
- do_flush = rq->cmd_flags & REQ_FLUSH;
+ do_flush = (req_op(rq) == REQ_OP_FLUSH);
do_write = (rq_data_dir(rq) == WRITE);
if (!do_flush) { /* osd_flush does not use a bio */
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
while ((req = blk_fetch_request(q))) {
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
if (ps3disk_submit_flush_request(dev, req))
break;
} else if (req->cmd_type == REQ_TYPE_FS) {
return IRQ_HANDLED;
}
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
read = 0;
op = "flush";
} else {
data_dir = rq_data_dir(req);
io_flags = req->cmd_flags;
- if (io_flags & REQ_FLUSH)
+ if (req_op(req) == REQ_OP_FLUSH)
flush++;
if (io_flags & REQ_FUA)
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
vbr->req = req;
- if (req->cmd_flags & REQ_FLUSH) {
+ if (req_op(req) == REQ_OP_FLUSH) {
vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_FLUSH);
vbr->out_hdr.sector = 0;
vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req));
* The indirect operation can only be a BLKIF_OP_READ or
* BLKIF_OP_WRITE
*/
- BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
+ BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
ring_req->operation = BLKIF_OP_INDIRECT;
ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
- if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
/*
* Ideally we can do an unordered flush-to-disk.
* In case the backend onlysupports barriers, use that.
struct blkfront_info *info)
{
return ((req->cmd_type != REQ_TYPE_FS) ||
- ((req->cmd_flags & REQ_FLUSH) &&
+ ((req_op(req) == REQ_OP_FLUSH) &&
!(info->feature_flush & REQ_FLUSH)) ||
((req->cmd_flags & REQ_FUA) &&
!(info->feature_flush & REQ_FUA)));
/*
* Get the bios in the request so we can re-queue them.
*/
- if (copy[i].request->cmd_flags & REQ_FLUSH ||
+ if (req_op(copy[i].request) == REQ_OP_FLUSH ||
req_op(copy[i].request) == REQ_OP_DISCARD ||
copy[i].request->cmd_flags & (REQ_FUA | REQ_SECURE)) {
/*
ide_drive_t *drive = q->queuedata;
struct ide_cmd *cmd;
- if (!(rq->cmd_flags & REQ_FLUSH))
+ if (req_op(rq) != REQ_OP_FLUSH)
return BLKPREP_OK;
if (rq->special) {
/* always use block 0 to find the target for flushes for now */
pos = 0;
- if (!(rq->cmd_flags & REQ_FLUSH))
+ if (req_op(rq) != REQ_OP_FLUSH)
pos = blk_rq_pos(rq);
if ((dm_request_peeked_before_merge_deadline(md) &&
!IS_ALIGNED(blk_rq_sectors(next), 8))
break;
- if (req_op(next) == REQ_OP_DISCARD || next->cmd_flags & REQ_FLUSH)
+ if (req_op(next) == REQ_OP_DISCARD ||
+ req_op(next) == REQ_OP_FLUSH)
break;
if (rq_data_dir(cur) != rq_data_dir(next))
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
- unsigned int cmd_flags = req ? req->cmd_flags : 0;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (cmd_flags & REQ_FLUSH) {
+ } else if (req && req_op(req) == REQ_OP_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
static inline bool mmc_req_is_special(struct request *req)
{
- return req && (req->cmd_flags & REQ_FLUSH || req_op(req) == REQ_OP_DISCARD);
+ return req &&
+ (req_op(req) == REQ_OP_FLUSH || req_op(req) == REQ_OP_DISCARD);
}
struct request;
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
- if (req->cmd_flags & REQ_FLUSH)
+ if (req_op(req) == REQ_OP_FLUSH)
return tr->flush(dev);
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
if (req->cmd_type == REQ_TYPE_DRV_PRIV)
memcpy(cmd, req->cmd, sizeof(*cmd));
- else if (req->cmd_flags & REQ_FLUSH)
+ else if (req_op(req) == REQ_OP_FLUSH)
nvme_setup_flush(ns, cmd);
else if (req_op(req) == REQ_OP_DISCARD)
ret = nvme_setup_discard(ns, req, cmd);
return sd_setup_discard_cmnd(cmd);
case REQ_OP_WRITE_SAME:
return sd_setup_write_same_cmnd(cmd);
+ case REQ_OP_FLUSH:
+ return sd_setup_flush_cmnd(cmd);
case REQ_OP_READ:
case REQ_OP_WRITE:
- if (rq->cmd_flags & REQ_FLUSH)
- return sd_setup_flush_cmnd(cmd);
- else
- return sd_setup_read_write_cmnd(cmd);
+ return sd_setup_read_write_cmnd(cmd);
default:
BUG();
}
REQ_OP_WRITE,
REQ_OP_DISCARD, /* request to discard sectors */
REQ_OP_WRITE_SAME, /* write same block many times */
+ REQ_OP_FLUSH, /* request for cache flush */
};
-#define REQ_OP_BITS 2
+#define REQ_OP_BITS 3
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
if (rq->cmd_type != REQ_TYPE_FS)
return false;
+ if (req_op(rq) == REQ_OP_FLUSH)
+ return false;
+
if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
return false;
what |= MASK_TC_BIT(op_flags, FUA);
if (op == REQ_OP_DISCARD)
what |= BLK_TC_ACT(BLK_TC_DISCARD);
+ if (op == REQ_OP_FLUSH)
+ what |= BLK_TC_ACT(BLK_TC_FLUSH);
pid = tsk->pid;
if (act_log_check(bt, what, sector, pid))
case REQ_OP_DISCARD:
rwbs[i++] = 'D';
break;
+ case REQ_OP_FLUSH:
+ rwbs[i++] = 'F';
+ break;
case REQ_OP_READ:
rwbs[i++] = 'R';
break;