block,fs: use REQ_* flags directly
authorChristoph Hellwig <hch@lst.de>
Tue, 1 Nov 2016 13:40:10 +0000 (07:40 -0600)
committerJens Axboe <axboe@fb.com>
Tue, 1 Nov 2016 15:43:26 +0000 (09:43 -0600)
Remove the WRITE_* and READ_SYNC wrappers, and just use the flags
directly.  Where applicable this also drops usage of the
bio_set_op_attrs wrapper.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
53 files changed:
block/blk-flush.c
drivers/block/drbd/drbd_receiver.c
drivers/block/xen-blkback/blkback.c
drivers/md/bcache/btree.c
drivers/md/bcache/debug.c
drivers/md/bcache/request.c
drivers/md/bcache/super.c
drivers/md/dm-bufio.c
drivers/md/dm-log.c
drivers/md/dm-raid1.c
drivers/md/dm-snap-persistent.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/nvme/target/io-cmd.c
drivers/target/target_core_iblock.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/buffer.c
fs/direct-io.c
fs/ext4/mmp.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/gfs2/log.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/hfsplus/super.c
fs/jbd2/checkpoint.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/jfs/jfs_logmgr.c
fs/mpage.c
fs/nilfs2/super.c
fs/ocfs2/cluster/heartbeat.c
fs/reiserfs/journal.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_buf.c
include/linux/fs.h
include/trace/events/f2fs.h
kernel/power/swap.c

index 95f1d4d357df8efa45c928798c65f80d93def0f6..d35beca18481671431283f66ce069f3df7083ef6 100644 (file)
@@ -330,7 +330,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq)
        }
 
        flush_rq->cmd_type = REQ_TYPE_FS;
-       flush_rq->cmd_flags = REQ_OP_FLUSH | WRITE_FLUSH;
+       flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
        flush_rq->rq_flags |= RQF_FLUSH_SEQ;
        flush_rq->rq_disk = first_rq->rq_disk;
        flush_rq->end_io = flush_end_io;
@@ -486,7 +486,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
 
        bio = bio_alloc(gfp_mask, 0);
        bio->bi_bdev = bdev;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
        ret = submit_bio_wait(bio);
 
index 942384f34e2284f699213e017349043a74e5f85d..a89538cb3eaabbd89ffb166acf504b5d96dd97ac 100644 (file)
@@ -1266,7 +1266,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
        bio->bi_bdev = device->ldev->backing_bdev;
        bio->bi_private = octx;
        bio->bi_end_io = one_flush_endio;
-       bio_set_op_attrs(bio, REQ_OP_FLUSH, WRITE_FLUSH);
+       bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
 
        device->flush_jif = jiffies;
        set_bit(FLUSH_PENDING, &device->flags);
index 4a80ee752597f02adfc8096e5d14ce9a27e03f0e..726c32e35db9c542e6f050ff0a04e31e10fc2b7d 100644 (file)
@@ -1253,14 +1253,14 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        case BLKIF_OP_WRITE:
                ring->st_wr_req++;
                operation = REQ_OP_WRITE;
-               operation_flags = WRITE_ODIRECT;
+               operation_flags = REQ_SYNC | REQ_IDLE;
                break;
        case BLKIF_OP_WRITE_BARRIER:
                drain = true;
        case BLKIF_OP_FLUSH_DISKCACHE:
                ring->st_f_req++;
                operation = REQ_OP_WRITE;
-               operation_flags = WRITE_FLUSH;
+               operation_flags = REQ_PREFLUSH;
                break;
        default:
                operation = 0; /* make gcc happy */
@@ -1272,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        nseg = req->operation == BLKIF_OP_INDIRECT ?
               req->u.indirect.nr_segments : req->u.rw.nr_segments;
 
-       if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
+       if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
            unlikely((req->operation != BLKIF_OP_INDIRECT) &&
                     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
            unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1334,7 +1334,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
        }
 
        /* Wait on all outstanding I/O's and once that has been completed
-        * issue the WRITE_FLUSH.
+        * issue the flush.
         */
        if (drain)
                xen_blk_drain_io(pending_req->ring);
@@ -1380,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
 
        /* This will be hit if the operation was a flush or discard. */
        if (!bio) {
-               BUG_ON(operation_flags != WRITE_FLUSH);
+               BUG_ON(operation_flags != REQ_PREFLUSH);
 
                bio = bio_alloc(GFP_KERNEL, 0);
                if (unlikely(bio == NULL))
index 81d3db40cd7be634ca7697a928654a89650ef804..6fdd8e252760cbc11ff8cceb1c38fb85eccbcbad 100644 (file)
@@ -297,7 +297,7 @@ static void bch_btree_node_read(struct btree *b)
        bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
        bio->bi_end_io  = btree_node_read_endio;
        bio->bi_private = &cl;
-       bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+       bio->bi_opf = REQ_OP_READ | REQ_META;
 
        bch_bio_map(bio, b->keys.set[0].data);
 
@@ -393,7 +393,7 @@ static void do_btree_node_write(struct btree *b)
        b->bio->bi_end_io       = btree_node_write_endio;
        b->bio->bi_private      = cl;
        b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
-       bio_set_op_attrs(b->bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
+       b->bio->bi_opf          = REQ_OP_WRITE | REQ_META | REQ_FUA;
        bch_bio_map(b->bio, i);
 
        /*
index 333a1e5f6ae66c1153851c666b923e28e80758d9..1c9130ae00736c7ed987c997c943962faefe9ee6 100644 (file)
@@ -52,7 +52,7 @@ void bch_btree_verify(struct btree *b)
        bio->bi_bdev            = PTR_CACHE(b->c, &b->key, 0)->bdev;
        bio->bi_iter.bi_sector  = PTR_OFFSET(&b->key, 0);
        bio->bi_iter.bi_size    = KEY_SIZE(&v->key) << 9;
-       bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
+       bio->bi_opf             = REQ_OP_READ | REQ_META;
        bch_bio_map(bio, sorted);
 
        submit_bio_wait(bio);
@@ -113,7 +113,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
        check = bio_clone(bio, GFP_NOIO);
        if (!check)
                return;
-       bio_set_op_attrs(check, REQ_OP_READ, READ_SYNC);
+       check->bi_opf = REQ_OP_READ;
 
        if (bio_alloc_pages(check, GFP_NOIO))
                goto out_put;
index e8a2b693c9288351c0d0b2d4c2809c4afecf1e7a..0d99b5f4b3e61334c745e967fd6441282c7136bf 100644 (file)
@@ -923,7 +923,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                        flush->bi_bdev  = bio->bi_bdev;
                        flush->bi_end_io = request_endio;
                        flush->bi_private = cl;
-                       bio_set_op_attrs(flush, REQ_OP_WRITE, WRITE_FLUSH);
+                       flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
                        closure_bio_submit(flush, cl);
                }
index 849ad441cd76ba6b04ba2fa2aaac51471acdfd67..988edf92846603875534180091ae7f9d4473366b 100644 (file)
@@ -381,7 +381,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
                return "bad uuid pointer";
 
        bkey_copy(&c->uuid_bucket, k);
-       uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
+       uuid_io(c, REQ_OP_READ, 0, k, cl);
 
        if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
                struct uuid_entry_v0    *u0 = (void *) c->uuids;
@@ -600,7 +600,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
                        ca->prio_last_buckets[bucket_nr] = bucket;
                        bucket_nr++;
 
-                       prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
+                       prio_io(ca, bucket, REQ_OP_READ, 0);
 
                        if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
                                pr_warn("bad csum reading priorities");
index 125aedc3875f4ff69c53c23807922a825c9a4121..b3ba142e59a4cd2c0a8bbbbb70f3095034218092 100644 (file)
@@ -1316,7 +1316,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c)
 {
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = WRITE_FLUSH,
+               .bi_op_flags = REQ_PREFLUSH,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = c->dm_io,
index 07fc1ad42ec57c4c835c43990187410036e5e5bf..33e71ea6cc1435bf13b7dc88546e30c83ca89083 100644 (file)
@@ -308,7 +308,7 @@ static int flush_header(struct log_c *lc)
        };
 
        lc->io_req.bi_op = REQ_OP_WRITE;
-       lc->io_req.bi_op_flags = WRITE_FLUSH;
+       lc->io_req.bi_op_flags = REQ_PREFLUSH;
 
        return dm_io(&lc->io_req, 1, &null_location, NULL);
 }
index bdf1606f67bcfbfcfadcfdc65f2c559c68fff5c9..1a176d7c8b905d112470c7e0523be118d879598b 100644 (file)
@@ -261,7 +261,7 @@ static int mirror_flush(struct dm_target *ti)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = WRITE_FLUSH,
+               .bi_op_flags = REQ_PREFLUSH,
                .mem.type = DM_IO_KMEM,
                .mem.ptr.addr = NULL,
                .client = ms->io_client,
@@ -657,7 +657,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
        struct mirror *m;
        struct dm_io_request io_req = {
                .bi_op = REQ_OP_WRITE,
-               .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
+               .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
                .mem.type = DM_IO_BIO,
                .mem.ptr.bio = bio,
                .notify.fn = write_callback,
index b8cf956b577b4a2f235f4910b15ab2ef1942dd9b..b93476c3ba3f9767fb133fed977e7a888cc0698e 100644 (file)
@@ -741,7 +741,7 @@ static void persistent_commit_exception(struct dm_exception_store *store,
        /*
         * Commit exceptions to disk.
         */
-       if (ps->valid && area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA))
+       if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA))
                ps->valid = 0;
 
        /*
@@ -818,7 +818,7 @@ static int persistent_commit_merge(struct dm_exception_store *store,
        for (i = 0; i < nr_merged; i++)
                clear_exception(ps, ps->current_committed - 1 - i);
 
-       r = area_io(ps, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+       r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA);
        if (r < 0)
                return r;
 
index 147af9536d0c10d4054f42306383e6fe118a6ce4..b2abfa41af3ef3da9d5e6f84f6a11a9f6ef1282e 100644 (file)
@@ -1527,7 +1527,7 @@ static struct mapped_device *alloc_dev(int minor)
 
        bio_init(&md->flush_bio);
        md->flush_bio.bi_bdev = md->bdev;
-       bio_set_op_attrs(&md->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+       md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
        dm_stats_init(&md->stats);
 
index eac84d8ff7244b659bef2ccea8c8f4ada8b7bc2f..b69ec7da4baeef19dbbf18ce1aa2dd6d36bb0c20 100644 (file)
@@ -394,7 +394,7 @@ static void submit_flushes(struct work_struct *ws)
                        bi->bi_end_io = md_end_flush;
                        bi->bi_private = rdev;
                        bi->bi_bdev = rdev->bdev;
-                       bio_set_op_attrs(bi, REQ_OP_WRITE, WRITE_FLUSH);
+                       bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
                        atomic_inc(&mddev->flush_pending);
                        submit_bio(bi);
                        rcu_read_lock();
@@ -743,7 +743,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
        bio_add_page(bio, page, size, 0);
        bio->bi_private = rdev;
        bio->bi_end_io = super_written;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA);
+       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA;
 
        atomic_inc(&mddev->pending_writes);
        submit_bio(bio);
index 1b1ab4a1d132b39f0145b8bc3305484fad5a7091..28d015c6fffe1e205385bb7e4adb7c052e2a6a4d 100644 (file)
@@ -685,7 +685,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
        bio_reset(&log->flush_bio);
        log->flush_bio.bi_bdev = log->rdev->bdev;
        log->flush_bio.bi_end_io = r5l_log_flush_endio;
-       bio_set_op_attrs(&log->flush_bio, REQ_OP_WRITE, WRITE_FLUSH);
+       log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
        submit_bio(&log->flush_bio);
 }
 
@@ -1053,7 +1053,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
        mb->checksum = cpu_to_le32(crc);
 
        if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE,
-                         WRITE_FUA, false)) {
+                         REQ_FUA, false)) {
                __free_page(page);
                return -EIO;
        }
index 92ac251e91e62e94043ef35a78b911a66c00e741..70acdd379e4490d3107242f8502cf4d704400963 100644 (file)
@@ -913,7 +913,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
                if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
                        op = REQ_OP_WRITE;
                        if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
-                               op_flags = WRITE_FUA;
+                               op_flags = REQ_FUA;
                        if (test_bit(R5_Discard, &sh->dev[i].flags))
                                op = REQ_OP_DISCARD;
                } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
index 4a96c2049b7b6be310ea0e818fa29bb39db108d0..c2784cfc5e298b3d15d9351c17d5af8e058ff6db 100644 (file)
@@ -58,7 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
 
        if (req->cmd->rw.opcode == nvme_cmd_write) {
                op = REQ_OP_WRITE;
-               op_flags = WRITE_ODIRECT;
+               op_flags = REQ_SYNC | REQ_IDLE;
                if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA))
                        op_flags |= REQ_FUA;
        } else {
@@ -109,7 +109,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
        bio->bi_bdev = req->ns->bdev;
        bio->bi_private = req;
        bio->bi_end_io = nvmet_bio_done;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
 
        submit_bio(bio);
 }
index 372d744315f38e971da84d1f4eca26a66666a158..d316ed537d59132a3fde2dfd11eba3194d899bec 100644 (file)
@@ -388,7 +388,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
        bio = bio_alloc(GFP_KERNEL, 0);
        bio->bi_end_io = iblock_end_io_flush;
        bio->bi_bdev = ib_dev->ibd_bd;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
        if (!immed)
                bio->bi_private = cmd;
        submit_bio(bio);
@@ -686,15 +686,15 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
                struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
                /*
-                * Force writethrough using WRITE_FUA if a volatile write cache
+                * Force writethrough using REQ_FUA if a volatile write cache
                 * is not enabled, or if initiator set the Force Unit Access bit.
                 */
                op = REQ_OP_WRITE;
                if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
                        if (cmd->se_cmd_flags & SCF_FUA)
-                               op_flags = WRITE_FUA;
+                               op_flags = REQ_FUA;
                        else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
-                               op_flags = WRITE_FUA;
+                               op_flags = REQ_FUA;
                }
        } else {
                op = REQ_OP_READ;
index c8454a8e35f2e30329ddd4d5845bdedf39237dc9..fe10afd51e027f8345d9d98da489a5e42a6c6523 100644 (file)
@@ -3485,9 +3485,9 @@ static int write_dev_supers(struct btrfs_device *device,
                 * to go down lazy.
                 */
                if (i == 0)
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
+                       ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_FUA, bh);
                else
-                       ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
+                       ret = btrfsic_submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
                if (ret)
                        errors++;
        }
@@ -3551,7 +3551,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
 
        bio->bi_end_io = btrfs_end_empty_barrier;
        bio->bi_bdev = device->bdev;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+       bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
        init_completion(&device->flush_wait);
        bio->bi_private = &device->flush_wait;
        device->flush_bio = bio;
index 66a755150056ed06af2c3bc636b4dc76f45adce5..ff87bff7bdb65618aa57eb7eb2e79451f74dcb49 100644 (file)
@@ -127,7 +127,7 @@ struct extent_page_data {
         */
        unsigned int extent_locked:1;
 
-       /* tells the submit_bio code to use a WRITE_SYNC */
+       /* tells the submit_bio code to use REQ_SYNC */
        unsigned int sync_io:1;
 };
 
@@ -2047,7 +2047,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
                return -EIO;
        }
        bio->bi_bdev = dev->bdev;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
        bio_add_page(bio, page, length, pg_offset);
 
        if (btrfsic_submit_bio_wait(bio)) {
@@ -2388,7 +2388,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
        struct inode *inode = page->mapping->host;
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
        struct bio *bio;
-       int read_mode;
+       int read_mode = 0;
        int ret;
 
        BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -2404,9 +2404,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
        }
 
        if (failed_bio->bi_vcnt > 1)
-               read_mode = READ_SYNC | REQ_FAILFAST_DEV;
-       else
-               read_mode = READ_SYNC;
+               read_mode |= REQ_FAILFAST_DEV;
 
        phy_offset >>= inode->i_sb->s_blocksize_bits;
        bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
@@ -3484,7 +3482,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        unsigned long nr_written = 0;
 
        if (wbc->sync_mode == WB_SYNC_ALL)
-               write_flags = WRITE_SYNC;
+               write_flags = REQ_SYNC;
 
        trace___extent_writepage(page, inode, wbc);
 
@@ -3729,7 +3727,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
        unsigned long i, num_pages;
        unsigned long bio_flags = 0;
        unsigned long start, end;
-       int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
+       int write_flags = (epd->sync_io ? REQ_SYNC : 0) | REQ_META;
        int ret = 0;
 
        clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@@ -4076,7 +4074,7 @@ static void flush_epd_write_bio(struct extent_page_data *epd)
                int ret;
 
                bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
-                                epd->sync_io ? WRITE_SYNC : 0);
+                                epd->sync_io ? REQ_SYNC : 0);
 
                ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
                BUG_ON(ret < 0); /* -ENOMEM */
index 9a377079af267fb72bb538421b25d263539bc1ce..c8eb82a416b334ecb4ea30128bf6799570397d41 100644 (file)
@@ -7917,7 +7917,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
        struct io_failure_record *failrec;
        struct bio *bio;
        int isector;
-       int read_mode;
+       int read_mode = 0;
        int ret;
 
        BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
@@ -7936,9 +7936,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
        if ((failed_bio->bi_vcnt > 1)
                || (failed_bio->bi_io_vec->bv_len
                        > BTRFS_I(inode)->root->sectorsize))
-               read_mode = READ_SYNC | REQ_FAILFAST_DEV;
-       else
-               read_mode = READ_SYNC;
+               read_mode |= REQ_FAILFAST_DEV;
 
        isector = start - btrfs_io_bio(failed_bio)->logical;
        isector >>= inode->i_sb->s_blocksize_bits;
index fffb9ab8526eb43f662ba0e943112742798c841d..ff3078234d94d06f722452a15e34a06577fbf1c5 100644 (file)
@@ -4440,7 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
        bio->bi_iter.bi_size = 0;
        bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
        bio->bi_bdev = dev->bdev;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
        ret = bio_add_page(bio, page, PAGE_SIZE, 0);
        if (ret != PAGE_SIZE) {
 leave_with_eio:
index deda46cf1292f683f1f94d48ac4f9d85b6140a7c..0d7d635d8bfbea68f1053d1d1830a3a8908c299e 100644 (file)
@@ -6023,7 +6023,7 @@ static void btrfs_end_bio(struct bio *bio)
                                else
                                        btrfs_dev_stat_inc(dev,
                                                BTRFS_DEV_STAT_READ_ERRS);
-                               if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
+                               if (bio->bi_opf & REQ_PREFLUSH)
                                        btrfs_dev_stat_inc(dev,
                                                BTRFS_DEV_STAT_FLUSH_ERRS);
                                btrfs_dev_stat_print_on_error(dev);
index 09ed29c67848c4a2f06781a75d7e6794e59c1a4d..f137ffe6654c5df2220f6c69f8d4f26b4d3fff99 100644 (file)
@@ -62,7 +62,7 @@ struct btrfs_device {
        int running_pending;
        /* regular prio bios */
        struct btrfs_pending_bios pending_bios;
-       /* WRITE_SYNC bios */
+       /* sync bios */
        struct btrfs_pending_bios pending_sync_bios;
 
        struct block_device *bdev;
index a29335867e30380eb60d44436a009b23f02f4b08..bc7c2bb30a9bfa0f9accff7cfa8142b3aa07af24 100644 (file)
@@ -753,7 +753,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * still in flight on potentially older
                                 * contents.
                                 */
-                               write_dirty_buffer(bh, WRITE_SYNC);
+                               write_dirty_buffer(bh, REQ_SYNC);
 
                                /*
                                 * Kick off IO for the previous mapping. Note
@@ -1684,7 +1684,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
  * prevents this contention from occurring.
  *
  * If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
  * causes the writes to be flagged as synchronous writes.
  */
 int __block_write_full_page(struct inode *inode, struct page *page,
@@ -1697,7 +1697,7 @@ int __block_write_full_page(struct inode *inode, struct page *page,
        struct buffer_head *bh, *head;
        unsigned int blocksize, bbits;
        int nr_underway = 0;
-       int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+       int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
 
        head = create_page_buffers(page, inode,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
@@ -3210,7 +3210,7 @@ EXPORT_SYMBOL(__sync_dirty_buffer);
 
 int sync_dirty_buffer(struct buffer_head *bh)
 {
-       return __sync_dirty_buffer(bh, WRITE_SYNC);
+       return __sync_dirty_buffer(bh, REQ_SYNC);
 }
 EXPORT_SYMBOL(sync_dirty_buffer);
 
index fb9aa16a77272818e39294b21506927076da27d7..a5138c564019a4ca863b0540f7503f0a55ecd27e 100644 (file)
@@ -1209,7 +1209,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
        dio->inode = inode;
        if (iov_iter_rw(iter) == WRITE) {
                dio->op = REQ_OP_WRITE;
-               dio->op_flags = WRITE_ODIRECT;
+               dio->op_flags = REQ_SYNC | REQ_IDLE;
        } else {
                dio->op = REQ_OP_READ;
        }
index d89754ef1aab72075c378fc64300259e73a12ccc..eb98356386808bfc4880f29ac89daefdc11bbbfe 100644 (file)
@@ -35,7 +35,7 @@ static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
 }
 
 /*
- * Write the MMP block using WRITE_SYNC to try to get the block on-disk
+ * Write the MMP block using REQ_SYNC to try to get the block on-disk
  * faster.
  */
 static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
        lock_buffer(bh);
        bh->b_end_io = end_buffer_write_sync;
        get_bh(bh);
-       submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh);
+       submit_bh(REQ_OP_WRITE, REQ_SYNC | REQ_META | REQ_PRIO, bh);
        wait_on_buffer(bh);
        sb_end_write(sb);
        if (unlikely(!buffer_uptodate(bh)))
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
        get_bh(*bh);
        lock_buffer(*bh);
        (*bh)->b_end_io = end_buffer_read_sync;
-       submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh);
+       submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, *bh);
        wait_on_buffer(*bh);
        if (!buffer_uptodate(*bh)) {
                ret = -EIO;
index 0094923e5ebf56a6a65198cf5a7040078b73240a..e0b3b54cdef32651d32685bc6cfe56ae23602dcf 100644 (file)
@@ -340,7 +340,7 @@ void ext4_io_submit(struct ext4_io_submit *io)
 
        if (bio) {
                int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
-                                 WRITE_SYNC : 0;
+                                 REQ_SYNC : 0;
                bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
                submit_bio(io->io_bio);
        }
index 6db81fbcbaa6cce558b6ef9f7e613e8284e898a6..f31eb286af90a5b15c65ec8c34598435315dde54 100644 (file)
@@ -4553,7 +4553,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        unlock_buffer(sbh);
        if (sync) {
                error = __sync_dirty_buffer(sbh,
-                       test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
+                       test_opt(sb, BARRIER) ? REQ_FUA : REQ_SYNC);
                if (error)
                        return error;
 
index 7e9b504bd8b295787b22c8c933747894e42451d2..d935c06a84f038eab9284ed1f54a95bf6bace532 100644 (file)
@@ -65,7 +65,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
                .sbi = sbi,
                .type = META,
                .op = REQ_OP_READ,
-               .op_flags = READ_SYNC | REQ_META | REQ_PRIO,
+               .op_flags = REQ_META | REQ_PRIO,
                .old_blkaddr = index,
                .new_blkaddr = index,
                .encrypted_page = NULL,
@@ -160,7 +160,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
                .sbi = sbi,
                .type = META,
                .op = REQ_OP_READ,
-               .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
+               .op_flags = sync ? (REQ_META | REQ_PRIO) : REQ_RAHEAD,
                .encrypted_page = NULL,
        };
        struct blk_plug plug;
index 9ae194fd2fdb81cbc54bfed9220da527d6b21a89..b80bf10603d74821588ca5122e64c206036dc1a9 100644 (file)
@@ -198,11 +198,9 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
        if (type >= META_FLUSH) {
                io->fio.type = META_FLUSH;
                io->fio.op = REQ_OP_WRITE;
-               if (test_opt(sbi, NOBARRIER))
-                       io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
-               else
-                       io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
-                                                               REQ_PRIO;
+               io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO;
+               if (!test_opt(sbi, NOBARRIER))
+                       io->fio.op_flags |= REQ_FUA;
        }
        __submit_merged_bio(io);
 out:
@@ -483,7 +481,7 @@ struct page *find_data_page(struct inode *inode, pgoff_t index)
                return page;
        f2fs_put_page(page, 0);
 
-       page = get_read_data_page(inode, index, READ_SYNC, false);
+       page = get_read_data_page(inode, index, 0, false);
        if (IS_ERR(page))
                return page;
 
@@ -509,7 +507,7 @@ struct page *get_lock_data_page(struct inode *inode, pgoff_t index,
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
 repeat:
-       page = get_read_data_page(inode, index, READ_SYNC, for_write);
+       page = get_read_data_page(inode, index, 0, for_write);
        if (IS_ERR(page))
                return page;
 
@@ -1251,7 +1249,7 @@ static int f2fs_write_data_page(struct page *page,
                .sbi = sbi,
                .type = DATA,
                .op = REQ_OP_WRITE,
-               .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
+               .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0,
                .page = page,
                .encrypted_page = NULL,
        };
@@ -1663,7 +1661,7 @@ repeat:
                        err = PTR_ERR(bio);
                        goto fail;
                }
-               bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
+               bio->bi_opf = REQ_OP_READ;
                if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
                        bio_put(bio);
                        err = -EFAULT;
index 93985c64d8a8bef1328ddd5f35cc7d047f41fc3a..9eb11b2244eac8416cd7507a5e717b6f65430da4 100644 (file)
@@ -550,7 +550,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
                .sbi = F2FS_I_SB(inode),
                .type = DATA,
                .op = REQ_OP_READ,
-               .op_flags = READ_SYNC,
+               .op_flags = 0,
                .encrypted_page = NULL,
        };
        struct dnode_of_data dn;
@@ -625,7 +625,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
        f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
        fio.op = REQ_OP_WRITE;
-       fio.op_flags = WRITE_SYNC;
+       fio.op_flags = REQ_SYNC;
        fio.new_blkaddr = newaddr;
        f2fs_submit_page_mbio(&fio);
 
@@ -663,7 +663,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
                        .sbi = F2FS_I_SB(inode),
                        .type = DATA,
                        .op = REQ_OP_WRITE,
-                       .op_flags = WRITE_SYNC,
+                       .op_flags = REQ_SYNC,
                        .page = page,
                        .encrypted_page = NULL,
                };
index 5f1a67f756afc449283d91c276fbce899fc360cb..2e7f54c191b441bd5bd41e052413a50a9c6cc363 100644 (file)
@@ -111,7 +111,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
                .sbi = F2FS_I_SB(dn->inode),
                .type = DATA,
                .op = REQ_OP_WRITE,
-               .op_flags = WRITE_SYNC | REQ_PRIO,
+               .op_flags = REQ_SYNC | REQ_PRIO,
                .page = page,
                .encrypted_page = NULL,
        };
index 01177ecdeab8e7bb5e0fe8eb34e2ae0e3d6ff434..932f3f8bb57bba26b6e7da11edc491dae18523a7 100644 (file)
@@ -1134,7 +1134,7 @@ repeat:
        if (!page)
                return ERR_PTR(-ENOMEM);
 
-       err = read_node_page(page, READ_SYNC);
+       err = read_node_page(page, 0);
        if (err < 0) {
                f2fs_put_page(page, 1);
                return ERR_PTR(err);
@@ -1570,7 +1570,7 @@ static int f2fs_write_node_page(struct page *page,
                .sbi = sbi,
                .type = NODE,
                .op = REQ_OP_WRITE,
-               .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
+               .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? REQ_SYNC : 0,
                .page = page,
                .encrypted_page = NULL,
        };
index fc886f0084495e84234d49edd10d55422c7cb854..f1b4a1775ebebf0bce781fbdc59bf70ab289b61a 100644 (file)
@@ -259,7 +259,7 @@ static int __commit_inmem_pages(struct inode *inode,
                .sbi = sbi,
                .type = DATA,
                .op = REQ_OP_WRITE,
-               .op_flags = WRITE_SYNC | REQ_PRIO,
+               .op_flags = REQ_SYNC | REQ_PRIO,
                .encrypted_page = NULL,
        };
        bool submit_bio = false;
@@ -420,7 +420,7 @@ repeat:
                fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
 
                bio->bi_bdev = sbi->sb->s_bdev;
-               bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+               bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
                ret = submit_bio_wait(bio);
 
                llist_for_each_entry_safe(cmd, next,
@@ -454,7 +454,7 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
 
                atomic_inc(&fcc->submit_flush);
                bio->bi_bdev = sbi->sb->s_bdev;
-               bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+               bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
                ret = submit_bio_wait(bio);
                atomic_dec(&fcc->submit_flush);
                bio_put(bio);
@@ -1515,7 +1515,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
                .sbi = sbi,
                .type = META,
                .op = REQ_OP_WRITE,
-               .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
+               .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
                .old_blkaddr = page->index,
                .new_blkaddr = page->index,
                .page = page,
index 6132b4ce4e4ce348eec2c7c33ddce3700cb14b1b..2cac6bb860808dc9f2a42b633674b5379fbf1af8 100644 (file)
@@ -1238,7 +1238,7 @@ static int __f2fs_commit_super(struct buffer_head *bh,
        unlock_buffer(bh);
 
        /* it's rare case, we can do fua all the time */
-       return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA);
+       return __sync_dirty_buffer(bh, REQ_PREFLUSH | REQ_FUA);
 }
 
 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
index e58ccef09c917baa70542036b171dddeceae9d4b..27c00a16def0a50fc6fe6b40a94184f862196995 100644 (file)
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        struct gfs2_log_header *lh;
        unsigned int tail;
        u32 hash;
-       int op_flags = WRITE_FLUSH_FUA | REQ_META;
+       int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META;
        struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
        enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
        lh = page_address(page);
@@ -682,7 +682,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
                gfs2_ordered_wait(sdp);
                log_flush_wait(sdp);
-               op_flags = WRITE_SYNC | REQ_META | REQ_PRIO;
+               op_flags = REQ_SYNC | REQ_META | REQ_PRIO;
        }
 
        sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
index 373639a597823c0b0b58fe129bf3b0118735599a..e562b1191c9c8d5abc6a2b5d3d21bcae8c640c66 100644 (file)
@@ -38,7 +38,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
        struct buffer_head *bh, *head;
        int nr_underway = 0;
        int write_flags = REQ_META | REQ_PRIO |
-               (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
+               (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
 
        BUG_ON(!PageLocked(page));
        BUG_ON(!page_has_buffers(page));
@@ -285,7 +285,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
                }
        }
 
-       gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
+       gfs2_submit_bhs(REQ_OP_READ, REQ_META | REQ_PRIO, bhs, num);
        if (!(flags & DIO_WAIT))
                return 0;
 
@@ -453,7 +453,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (buffer_uptodate(first_bh))
                goto out;
        if (!buffer_locked(first_bh))
-               ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
+               ll_rw_block(REQ_OP_READ, REQ_META, 1, &first_bh);
 
        dblock++;
        extlen--;
index ff72ac6439c821d94607d138856b8a16c2ecc893..a34308df927f4f1395b918f27b30eec5dce60320 100644 (file)
@@ -246,7 +246,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
 
        bio->bi_end_io = end_bio_io_page;
        bio->bi_private = page;
-       bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
+       bio_set_op_attrs(bio, REQ_OP_READ, REQ_META);
        submit_bio(bio);
        wait_on_page_locked(page);
        bio_put(bio);
index 11854dd84572639e86fc70290be9ff2ee4bd725a..67aedf4c2e7c5b6a82f1caf4ef861549b31b5581 100644 (file)
@@ -221,7 +221,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
        error2 = hfsplus_submit_bio(sb,
                                   sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
                                   sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
-                                  WRITE_SYNC);
+                                  REQ_SYNC);
        if (!error)
                error = error2;
        if (!write_backup)
@@ -230,7 +230,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
        error2 = hfsplus_submit_bio(sb,
                                  sbi->part_start + sbi->sect_count - 2,
                                  sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
-                                 WRITE_SYNC);
+                                 REQ_SYNC);
        if (!error)
                error2 = error;
 out:
index 684996c8a3a4a2df646dc44c4ee4e90904470989..4055f51617eff973345a887e7df41259090f49ec 100644 (file)
@@ -186,7 +186,7 @@ __flush_batch(journal_t *journal, int *batch_count)
 
        blk_start_plug(&plug);
        for (i = 0; i < *batch_count; i++)
-               write_dirty_buffer(journal->j_chkpt_bhs[i], WRITE_SYNC);
+               write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
        blk_finish_plug(&plug);
 
        for (i = 0; i < *batch_count; i++) {
index 31f8ca0466392eef588a2e61fbc5b34b132c5345..8c514367ba5acb460d293fd6007e6479b7ceae0f 100644 (file)
@@ -155,9 +155,10 @@ static int journal_submit_commit_record(journal_t *journal,
 
        if (journal->j_flags & JBD2_BARRIER &&
            !jbd2_has_feature_async_commit(journal))
-               ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh);
+               ret = submit_bh(REQ_OP_WRITE,
+                       REQ_SYNC | REQ_PREFLUSH | REQ_FUA, bh);
        else
-               ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
+               ret = submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
 
        *cbh = bh;
        return ret;
@@ -402,7 +403,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                jbd2_journal_update_sb_log_tail(journal,
                                                journal->j_tail_sequence,
                                                journal->j_tail,
-                                               WRITE_SYNC);
+                                               REQ_SYNC);
                mutex_unlock(&journal->j_checkpoint_mutex);
        } else {
                jbd_debug(3, "superblock not updated\n");
@@ -717,7 +718,7 @@ start_journal_io:
                                clear_buffer_dirty(bh);
                                set_buffer_uptodate(bh);
                                bh->b_end_io = journal_end_buffer_io_sync;
-                               submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
+                               submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
                        }
                        cond_resched();
                        stats.run.rs_blocks_logged += bufs;
index 927da4956a89e5306cf918efbaceea108a7dfd27..8ed971eeab44d60cba663859db69ab5a777ad13e 100644 (file)
@@ -913,7 +913,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
         * space and if we lose sb update during power failure we'd replay
         * old transaction with possibly newly overwritten data.
         */
-       ret = jbd2_journal_update_sb_log_tail(journal, tid, block, WRITE_FUA);
+       ret = jbd2_journal_update_sb_log_tail(journal, tid, block, REQ_FUA);
        if (ret)
                goto out;
 
@@ -1306,7 +1306,7 @@ static int journal_reset(journal_t *journal)
                /* Lock here to make assertions happy... */
                mutex_lock(&journal->j_checkpoint_mutex);
                /*
-                * Update log tail information. We use WRITE_FUA since new
+                * Update log tail information. We use REQ_FUA since new
                 * transaction will start reusing journal space and so we
                 * must make sure information about current log tail is on
                 * disk before that.
@@ -1314,7 +1314,7 @@ static int journal_reset(journal_t *journal)
                jbd2_journal_update_sb_log_tail(journal,
                                                journal->j_tail_sequence,
                                                journal->j_tail,
-                                               WRITE_FUA);
+                                               REQ_FUA);
                mutex_unlock(&journal->j_checkpoint_mutex);
        }
        return jbd2_journal_start_thread(journal);
@@ -1454,7 +1454,7 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
        sb->s_errno    = cpu_to_be32(journal->j_errno);
        read_unlock(&journal->j_state_lock);
 
-       jbd2_write_superblock(journal, WRITE_FUA);
+       jbd2_write_superblock(journal, REQ_FUA);
 }
 EXPORT_SYMBOL(jbd2_journal_update_sb_errno);
 
@@ -1720,7 +1720,8 @@ int jbd2_journal_destroy(journal_t *journal)
                                ++journal->j_transaction_sequence;
                        write_unlock(&journal->j_state_lock);
 
-                       jbd2_mark_journal_empty(journal, WRITE_FLUSH_FUA);
+                       jbd2_mark_journal_empty(journal,
+                                       REQ_PREFLUSH | REQ_FUA);
                        mutex_unlock(&journal->j_checkpoint_mutex);
                } else
                        err = -EIO;
@@ -1979,7 +1980,7 @@ int jbd2_journal_flush(journal_t *journal)
         * the magic code for a fully-recovered superblock.  Any future
         * commits of data to the journal will restore the current
         * s_start value. */
-       jbd2_mark_journal_empty(journal, WRITE_FUA);
+       jbd2_mark_journal_empty(journal, REQ_FUA);
        mutex_unlock(&journal->j_checkpoint_mutex);
        write_lock(&journal->j_state_lock);
        J_ASSERT(!journal->j_running_transaction);
@@ -2025,7 +2026,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
        if (write) {
                /* Lock to make assertions happy... */
                mutex_lock(&journal->j_checkpoint_mutex);
-               jbd2_mark_journal_empty(journal, WRITE_FUA);
+               jbd2_mark_journal_empty(journal, REQ_FUA);
                mutex_unlock(&journal->j_checkpoint_mutex);
        }
 
index 91171dc352cbd10d7a0bc8120f6e8bffcf19d139..cfc38b5521189f8ff64330ff33aa6ac8c25794ec 100644 (file)
@@ -648,7 +648,7 @@ static void flush_descriptor(journal_t *journal,
        set_buffer_jwrite(descriptor);
        BUFFER_TRACE(descriptor, "write");
        set_buffer_dirty(descriptor);
-       write_dirty_buffer(descriptor, WRITE_SYNC);
+       write_dirty_buffer(descriptor, REQ_SYNC);
 }
 #endif
 
index a21ea8b3e5fa6762c80247137f332e3d3a651be1..bb1da1feafeb8202d904b4215f13dd1868dd2b4e 100644 (file)
@@ -2002,7 +2002,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
-       bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
+       bio->bi_opf = REQ_OP_READ;
        /*check if journaling to disk has been disabled*/
        if (log->no_integrity) {
                bio->bi_iter.bi_size = 0;
@@ -2146,7 +2146,7 @@ static void lbmStartIO(struct lbuf * bp)
 
        bio->bi_end_io = lbmIODone;
        bio->bi_private = bp;
-       bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
+       bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
 
        /* check if journaling to disk has been disabled */
        if (log->no_integrity) {
index d2413af0823a24202560eb73ee363dcb980c939c..f35e2819d0c699487c692e94e74ac794edd3c206 100644 (file)
@@ -489,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
        struct buffer_head map_bh;
        loff_t i_size = i_size_read(inode);
        int ret = 0;
-       int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : 0);
+       int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? REQ_SYNC : 0);
 
        if (page_has_buffers(page)) {
                struct buffer_head *head = page_buffers(page);
@@ -705,7 +705,7 @@ mpage_writepages(struct address_space *mapping,
                ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
                if (mpd.bio) {
                        int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
-                                 WRITE_SYNC : 0);
+                                 REQ_SYNC : 0);
                        mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
                }
        }
@@ -726,7 +726,7 @@ int mpage_writepage(struct page *page, get_block_t get_block,
        int ret = __mpage_writepage(page, wbc, &mpd);
        if (mpd.bio) {
                int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
-                         WRITE_SYNC : 0);
+                         REQ_SYNC : 0);
                mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
        }
        return ret;
index c95d369e90aa94ad89bd61ce4e833ae3b9f2365c..12eeae62a2b1f7042dcc3054a22920d9b151a036 100644 (file)
@@ -189,7 +189,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
        set_buffer_dirty(nilfs->ns_sbh[0]);
        if (nilfs_test_opt(nilfs, BARRIER)) {
                err = __sync_dirty_buffer(nilfs->ns_sbh[0],
-                                         WRITE_SYNC | WRITE_FLUSH_FUA);
+                                         REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
        } else {
                err = sync_dirty_buffer(nilfs->ns_sbh[0]);
        }
index 636abcbd46501b9c00ef5e3155d6656a161d7d49..52eef16edb01fcdbcd538a9d2f02d030a09eda5d 100644 (file)
@@ -627,7 +627,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
        slot = o2nm_this_node();
 
        bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
-                                WRITE_SYNC);
+                                REQ_SYNC);
        if (IS_ERR(bio)) {
                status = PTR_ERR(bio);
                mlog_errno(status);
index bc2dde2423c2eb3fdb464031d18c22e4586e81cb..aa40c242f1db3d9901c02eee0fd3e13a351a3c1b 100644 (file)
@@ -1111,7 +1111,8 @@ static int flush_commit_list(struct super_block *s,
                mark_buffer_dirty(jl->j_commit_bh) ;
                depth = reiserfs_write_unlock_nested(s);
                if (reiserfs_barrier_flush(s))
-                       __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
+                       __sync_dirty_buffer(jl->j_commit_bh,
+                                       REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(jl->j_commit_bh);
                reiserfs_write_lock_nested(s, depth);
@@ -1269,7 +1270,8 @@ static int _update_journal_header_block(struct super_block *sb,
                depth = reiserfs_write_unlock_nested(sb);
 
                if (reiserfs_barrier_flush(sb))
-                       __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
+                       __sync_dirty_buffer(journal->j_header_bh,
+                                       REQ_PREFLUSH | REQ_FUA);
                else
                        sync_dirty_buffer(journal->j_header_bh);
 
index 3e57a56cf8294770475e5edd04d03ee5565516fa..594e02c485b23d0359254ca31b03ffcebb4d90fb 100644 (file)
@@ -495,8 +495,10 @@ xfs_submit_ioend(
 
        ioend->io_bio->bi_private = ioend;
        ioend->io_bio->bi_end_io = xfs_end_bio;
-       bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
-                        (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+       ioend->io_bio->bi_opf = REQ_OP_WRITE;
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               ioend->io_bio->bi_opf |= REQ_SYNC;
+
        /*
         * If we are failing the IO now, just mark the ioend with an
         * error and finish it. This will run IO completion immediately
@@ -567,8 +569,9 @@ xfs_chain_bio(
 
        bio_chain(ioend->io_bio, new);
        bio_get(ioend->io_bio);         /* for xfs_destroy_ioend */
-       bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
-                         (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+       ioend->io_bio->bi_opf = REQ_OP_WRITE;
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               ioend->io_bio->bi_opf |= REQ_SYNC;
        submit_bio(ioend->io_bio);
        ioend->io_bio = new;
 }
index b5b9bffe352074806910a064ae41a941c97678c8..33c435f3316c6fe895bdd0a7e3794e8fb5268932 100644 (file)
@@ -1304,7 +1304,7 @@ _xfs_buf_ioapply(
        if (bp->b_flags & XBF_WRITE) {
                op = REQ_OP_WRITE;
                if (bp->b_flags & XBF_SYNCIO)
-                       op_flags = WRITE_SYNC;
+                       op_flags = REQ_SYNC;
                if (bp->b_flags & XBF_FUA)
                        op_flags |= REQ_FUA;
                if (bp->b_flags & XBF_FLUSH)
index 46a74209917fa58d9c282a1f88778d3b1054fc1f..7a1b78ab7c1527cda6bb419aa02f4f8c58551e75 100644 (file)
@@ -151,58 +151,11 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
  */
 #define CHECK_IOVEC_ONLY -1
 
-/*
- * The below are the various read and write flags that we support. Some of
- * them include behavioral modifiers that send information down to the
- * block layer and IO scheduler. They should be used along with a req_op.
- * Terminology:
- *
- *     The block layer uses device plugging to defer IO a little bit, in
- *     the hope that we will see more IO very shortly. This increases
- *     coalescing of adjacent IO and thus reduces the number of IOs we
- *     have to send to the device. It also allows for better queuing,
- *     if the IO isn't mergeable. If the caller is going to be waiting
- *     for the IO, then he must ensure that the device is unplugged so
- *     that the IO is dispatched to the driver.
- *
- *     All IO is handled async in Linux. This is fine for background
- *     writes, but for reads or writes that someone waits for completion
- *     on, we want to notify the block layer and IO scheduler so that they
- *     know about it. That allows them to make better scheduling
- *     decisions. So when the below references 'sync' and 'async', it
- *     is referencing this priority hint.
- *
- * With that in mind, the available types are:
- *
- * READ                        A normal read operation. Device will be plugged.
- * READ_SYNC           A synchronous read. Device is not plugged, caller can
- *                     immediately wait on this read without caring about
- *                     unplugging.
- * WRITE               A normal async write. Device will be plugged.
- * WRITE_SYNC          Synchronous write. Identical to WRITE, but passes down
- *                     the hint that someone will be waiting on this IO
- *                     shortly. The write equivalent of READ_SYNC.
- * WRITE_ODIRECT       Special case write for O_DIRECT only.
- * WRITE_FLUSH         Like WRITE_SYNC but with preceding cache flush.
- * WRITE_FUA           Like WRITE_SYNC but data is guaranteed to be on
- *                     non-volatile media on completion.
- * WRITE_FLUSH_FUA     Combination of WRITE_FLUSH and FUA. The IO is preceded
- *                     by a cache flush and data is guaranteed to be on
- *                     non-volatile media on completion.
- *
- */
 #define RW_MASK                        REQ_OP_WRITE
 
 #define READ                   REQ_OP_READ
 #define WRITE                  REQ_OP_WRITE
 
-#define READ_SYNC              0
-#define WRITE_SYNC             REQ_SYNC
-#define WRITE_ODIRECT          (REQ_SYNC | REQ_IDLE)
-#define WRITE_FLUSH            REQ_PREFLUSH
-#define WRITE_FUA              REQ_FUA
-#define WRITE_FLUSH_FUA                (REQ_PREFLUSH | REQ_FUA)
-
 /*
  * Attribute flags.  These should be or-ed together to figure out what
  * has been changed!
index a9d34424450dd93072b8952519f7e62f831456e6..5da2c829a71899571e4a2521dd8b7551474d3eb7 100644 (file)
@@ -55,7 +55,7 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
                { IPU,          "IN-PLACE" },                           \
                { OPU,          "OUT-OF-PLACE" })
 
-#define F2FS_BIO_FLAG_MASK(t)  (t & (REQ_RAHEAD | WRITE_FLUSH_FUA))
+#define F2FS_BIO_FLAG_MASK(t)  (t & (REQ_RAHEAD | REQ_PREFLUSH | REQ_FUA))
 #define F2FS_BIO_EXTRA_MASK(t) (t & (REQ_META | REQ_PRIO))
 
 #define show_bio_type(op_flags)        show_bio_op_flags(op_flags),            \
@@ -65,11 +65,9 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
        __print_symbolic(F2FS_BIO_FLAG_MASK(flags),                     \
                { 0,                    "WRITE" },                      \
                { REQ_RAHEAD,           "READAHEAD" },                  \
-               { READ_SYNC,            "READ_SYNC" },                  \
-               { WRITE_SYNC,           "WRITE_SYNC" },                 \
-               { WRITE_FLUSH,          "WRITE_FLUSH" },                \
-               { WRITE_FUA,            "WRITE_FUA" },                  \
-               { WRITE_FLUSH_FUA,      "WRITE_FLUSH_FUA" })
+               { REQ_SYNC,             "REQ_SYNC" },                   \
+               { REQ_PREFLUSH,         "REQ_PREFLUSH" },               \
+               { REQ_FUA,              "REQ_FUA" })
 
 #define show_bio_extra(type)                                           \
        __print_symbolic(F2FS_BIO_EXTRA_MASK(type),                     \
index a3b1e617bcdc39e8d1d3726c25b4d01876a0b57c..32e0c232efbafa4a3c8f6f40468c61eaad937d5b 100644 (file)
@@ -307,7 +307,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
 {
        int error;
 
-       hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
+       hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
                      swsusp_header, NULL);
        if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
            !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
@@ -317,7 +317,7 @@ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
                swsusp_header->flags = flags;
                if (flags & SF_CRC32_MODE)
                        swsusp_header->crc32 = handle->crc32;
-               error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+               error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
                                      swsusp_resume_block, swsusp_header, NULL);
        } else {
                printk(KERN_ERR "PM: Swap header not found!\n");
@@ -397,7 +397,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
        } else {
                src = buf;
        }
-       return hib_submit_io(REQ_OP_WRITE, WRITE_SYNC, offset, src, hb);
+       return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
 }
 
 static void release_swap_writer(struct swap_map_handle *handle)
@@ -1000,8 +1000,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
                        return -ENOMEM;
                }
 
-               error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset,
-                                     tmp->map, NULL);
+               error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL);
                if (error) {
                        release_swap_reader(handle);
                        return error;
@@ -1025,7 +1024,7 @@ static int swap_read_page(struct swap_map_handle *handle, void *buf,
        offset = handle->cur->entries[handle->k];
        if (!offset)
                return -EFAULT;
-       error = hib_submit_io(REQ_OP_READ, READ_SYNC, offset, buf, hb);
+       error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb);
        if (error)
                return error;
        if (++handle->k >= MAP_PAGE_ENTRIES) {
@@ -1534,7 +1533,7 @@ int swsusp_check(void)
        if (!IS_ERR(hib_resume_bdev)) {
                set_blocksize(hib_resume_bdev, PAGE_SIZE);
                clear_page(swsusp_header);
-               error = hib_submit_io(REQ_OP_READ, READ_SYNC,
+               error = hib_submit_io(REQ_OP_READ, 0,
                                        swsusp_resume_block,
                                        swsusp_header, NULL);
                if (error)
@@ -1543,7 +1542,7 @@ int swsusp_check(void)
                if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
                        memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
                        /* Reset swap signature now */
-                       error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+                       error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
                                                swsusp_resume_block,
                                                swsusp_header, NULL);
                } else {
@@ -1588,11 +1587,11 @@ int swsusp_unmark(void)
 {
        int error;
 
-       hib_submit_io(REQ_OP_READ, READ_SYNC, swsusp_resume_block,
+       hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block,
                      swsusp_header, NULL);
        if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
                memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
-               error = hib_submit_io(REQ_OP_WRITE, WRITE_SYNC,
+               error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC,
                                        swsusp_resume_block,
                                        swsusp_header, NULL);
        } else {