}
static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int rw)
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
+ int op_flags)
{
struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
struct bio *bio;
bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba;
- bio->bi_rw = rw;
+ bio_set_op_attrs(bio, op, op_flags);
return bio;
}
goto fail;
cmd->priv = ibr;
- bio = iblock_get_bio(cmd, block_lba, 1, WRITE);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE, 0);
if (!bio)
goto fail_free_ibr;
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
- bio = iblock_get_bio(cmd, block_lba, 1, WRITE);
+ bio = iblock_get_bio(cmd, block_lba, 1, REQ_OP_WRITE,
+ 0);
if (!bio)
goto fail_put_bios;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
unsigned bio_cnt;
- int rw = 0;
- int i;
+ int i, op, op_flags = 0;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
* Force writethrough using WRITE_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
+ op = REQ_OP_WRITE;
if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
if (cmd->se_cmd_flags & SCF_FUA)
- rw = WRITE_FUA;
+ op_flags = WRITE_FUA;
else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
- rw = WRITE_FUA;
- else
- rw = WRITE;
- } else {
- rw = WRITE;
+ op_flags = WRITE_FUA;
}
} else {
- rw = READ;
+ op = REQ_OP_READ;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
return 0;
}
- bio = iblock_get_bio(cmd, block_lba, sgl_nents, rw);
+ bio = iblock_get_bio(cmd, block_lba, sgl_nents, op, op_flags);
if (!bio)
goto fail_free_ibr;
bio_cnt = 0;
}
- bio = iblock_get_bio(cmd, block_lba, sg_num, rw);
+ bio = iblock_get_bio(cmd, block_lba, sg_num, op,
+ op_flags);
if (!bio)
goto fail_put_bios;