block: unify flags for struct bio and struct request
authorChristoph Hellwig <hch@lst.de>
Sat, 7 Aug 2010 16:20:39 +0000 (18:20 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Sat, 7 Aug 2010 16:20:39 +0000 (18:20 +0200)
Remove the current bio flags and reuse the request flags for the bio, too.
This allows to more easily trace the type of I/O from the filesystem
down to the block driver.  There were two flags in the bio that were
missing in the requests:  BIO_RW_UNPLUG and BIO_RW_AHEAD.  Also I've
renamed two request flags that had a superflous RW in them.

Note that the flags are in bio.h despite having the REQ_ name - as
blkdev.h includes bio.h that is the only way to go for now.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
47 files changed:
block/blk-barrier.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/cfq-iosched.c
block/elevator.c
drivers/ata/libata-scsi.c
drivers/block/aoe/aoeblk.c
drivers/block/brd.c
drivers/block/drbd/drbd_actlog.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/loop.c
drivers/block/pktcdvd.c
drivers/block/umem.c
drivers/ide/ide-cd_ioctl.c
drivers/ide/ide-floppy.c
drivers/md/dm-io.c
drivers/md/dm-kcopyd.c
drivers/md/dm-raid1.c
drivers/md/dm-stripe.c
drivers/md/dm.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/scsi/osd/osd_initiator.c
fs/bio.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/volumes.c
fs/exofs/ios.c
fs/gfs2/log.c
fs/gfs2/meta_io.c
fs/gfs2/ops_fstype.c
fs/nilfs2/segbuf.c
include/linux/bio.h
include/linux/blkdev.h
include/linux/fs.h
kernel/power/block_io.c
kernel/trace/blktrace.c
mm/page_io.c

index 74e404393172ce299d2ae0cc070b4232878d5cb2..7c6f4a714687e177a14123887fce87f3a337bbec 100644 (file)
@@ -203,7 +203,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
                /* initialize proxy request and queue it */
                blk_rq_init(q, rq);
                if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
-                       rq->cmd_flags |= REQ_RW;
+                       rq->cmd_flags |= REQ_WRITE;
                if (q->ordered & QUEUE_ORDERED_DO_FUA)
                        rq->cmd_flags |= REQ_FUA;
                init_request_from_bio(rq, q->orig_bar_rq->bio);
index dca43a31e7255ee67d913225abe053c2265ad2f4..66c3cfe94d0ad3efc68b4379290435f257c414a4 100644 (file)
@@ -1140,25 +1140,9 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        req->cpu = bio->bi_comp_cpu;
        req->cmd_type = REQ_TYPE_FS;
 
-       /*
-        * Inherit FAILFAST from bio (for read-ahead, and explicit
-        * FAILFAST).  FAILFAST flags are identical for req and bio.
-        */
-       if (bio_rw_flagged(bio, BIO_RW_AHEAD))
+       req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
+       if (bio->bi_rw & REQ_RAHEAD)
                req->cmd_flags |= REQ_FAILFAST_MASK;
-       else
-               req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK;
-
-       if (bio_rw_flagged(bio, BIO_RW_DISCARD))
-               req->cmd_flags |= REQ_DISCARD;
-       if (bio_rw_flagged(bio, BIO_RW_BARRIER))
-               req->cmd_flags |= REQ_HARDBARRIER;
-       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
-               req->cmd_flags |= REQ_RW_SYNC;
-       if (bio_rw_flagged(bio, BIO_RW_META))
-               req->cmd_flags |= REQ_RW_META;
-       if (bio_rw_flagged(bio, BIO_RW_NOIDLE))
-               req->cmd_flags |= REQ_NOIDLE;
 
        req->errors = 0;
        req->__sector = bio->bi_sector;
@@ -1181,12 +1165,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
        int el_ret;
        unsigned int bytes = bio->bi_size;
        const unsigned short prio = bio_prio(bio);
-       const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
-       const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG);
+       const bool sync = (bio->bi_rw & REQ_SYNC);
+       const bool unplug = (bio->bi_rw & REQ_UNPLUG);
        const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK;
        int rw_flags;
 
-       if (bio_rw_flagged(bio, BIO_RW_BARRIER) &&
+       if ((bio->bi_rw & REQ_HARDBARRIER) &&
            (q->next_ordered == QUEUE_ORDERED_NONE)) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
@@ -1200,7 +1184,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
 
        spin_lock_irq(q->queue_lock);
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q))
+       if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q))
                goto get_rq;
 
        el_ret = elv_merge(q, &req, bio);
@@ -1275,7 +1259,7 @@ get_rq:
         */
        rw_flags = bio_data_dir(bio);
        if (sync)
-               rw_flags |= REQ_RW_SYNC;
+               rw_flags |= REQ_SYNC;
 
        /*
         * Grab a free request. This is might sleep but can not fail.
@@ -1464,7 +1448,7 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) &&
+               if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
                             nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
                               bdevname(bio->bi_bdev, b),
@@ -1497,8 +1481,7 @@ static inline void __generic_make_request(struct bio *bio)
                if (bio_check_eod(bio, nr_sectors))
                        goto end_io;
 
-               if (bio_rw_flagged(bio, BIO_RW_DISCARD) &&
-                   !blk_queue_discard(q)) {
+               if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
                        err = -EOPNOTSUPP;
                        goto end_io;
                }
@@ -2365,7 +2348,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                     struct bio *bio)
 {
        /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
-       rq->cmd_flags |= bio->bi_rw & REQ_RW;
+       rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
 
        if (bio_has_data(bio)) {
                rq->nr_phys_segments = bio_phys_segments(q, bio);
index 9083cf0180cc8a296d2529a4ef9d9943ea5d05c1..c65d7593f7f1deba5511347ceeb6dd5c881e5b3c 100644 (file)
@@ -307,7 +307,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                return PTR_ERR(bio);
 
        if (rq_data_dir(rq) == WRITE)
-               bio->bi_rw |= (1 << BIO_RW);
+               bio->bi_rw |= (1 << REQ_WRITE);
 
        if (do_copy)
                rq->cmd_flags |= REQ_COPY_USER;
index 87e4fb7d0e9814053b775130125437b90de81fc8..4852475521ea911689be649166b8f592684cb2c6 100644 (file)
@@ -180,7 +180,7 @@ new_segment:
        }
 
        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-               if (rq->cmd_flags & REQ_RW)
+               if (rq->cmd_flags & REQ_WRITE)
                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
                sg->page_link &= ~0x02;
index d4edeb8fceb8b33926e679e54459dc4b442386ae..eb4086f7dfef9eb7efc6d202fb7a979919a551b8 100644 (file)
@@ -458,7 +458,7 @@ static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
  */
 static inline bool cfq_bio_sync(struct bio *bio)
 {
-       return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
 }
 
 /*
@@ -646,10 +646,10 @@ cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2,
                return rq1;
        else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
                return rq2;
-       if ((rq1->cmd_flags & REQ_RW_META) && !(rq2->cmd_flags & REQ_RW_META))
+       if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
                return rq1;
-       else if ((rq2->cmd_flags & REQ_RW_META) &&
-                !(rq1->cmd_flags & REQ_RW_META))
+       else if ((rq2->cmd_flags & REQ_META) &&
+                !(rq1->cmd_flags & REQ_META))
                return rq2;
 
        s1 = blk_rq_pos(rq1);
@@ -1485,7 +1485,7 @@ static void cfq_remove_request(struct request *rq)
        cfqq->cfqd->rq_queued--;
        cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(rq), rq_is_sync(rq));
-       if (rq->cmd_flags & REQ_RW_META) {
+       if (rq->cmd_flags & REQ_META) {
                WARN_ON(!cfqq->meta_pending);
                cfqq->meta_pending--;
        }
@@ -3177,7 +3177,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * So both queues are sync. Let the new request get disk time if
         * it's a metadata request and the current queue is doing regular IO.
         */
-       if ((rq->cmd_flags & REQ_RW_META) && !cfqq->meta_pending)
+       if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
                return true;
 
        /*
@@ -3231,7 +3231,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        struct cfq_io_context *cic = RQ_CIC(rq);
 
        cfqd->rq_queued++;
-       if (rq->cmd_flags & REQ_RW_META)
+       if (rq->cmd_flags & REQ_META)
                cfqq->meta_pending++;
 
        cfq_update_io_thinktime(cfqd, cic);
index aa99b59c03d6748c17e948687a9e38a08c4224a4..816a7c8d6394257d89fb36bd67c6b01a62d92020 100644 (file)
@@ -79,8 +79,7 @@ int elv_rq_merge_ok(struct request *rq, struct bio *bio)
        /*
         * Don't merge file system requests and discard requests
         */
-       if (bio_rw_flagged(bio, BIO_RW_DISCARD) !=
-           bio_rw_flagged(rq->bio, BIO_RW_DISCARD))
+       if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
                return 0;
 
        /*
index a5c08b082edbed869ccc895d9f4da20b01b31d32..0a8cd34847915295d04867d1fd576517c91e8646 100644 (file)
@@ -1114,7 +1114,7 @@ static int atapi_drain_needed(struct request *rq)
        if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC))
                return 0;
 
-       if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW))
+       if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE))
                return 0;
 
        return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
index 035cefe4045ae76f076f3d56a85b058c3bb68fc7..65deffde60acadb7ae3329a0485ba1552b75c941 100644 (file)
@@ -173,7 +173,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
                BUG();
                bio_endio(bio, -ENXIO);
                return 0;
-       } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+       } else if (bio->bi_rw & REQ_HARDBARRIER) {
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
        } else if (bio->bi_io_vec == NULL) {
index f1bf79d9bc0a1c65df988ef4ec0b3c1eab907d93..1b218c6b68205c961948b3fe14f315d26f711aa4 100644 (file)
@@ -340,7 +340,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
                                                get_capacity(bdev->bd_disk))
                goto out;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
+       if (unlikely(bio->bi_rw & REQ_DISCARD)) {
                err = 0;
                discard_from_brd(brd, sector, bio->bi_size);
                goto out;
index df018990c42252f59f4e57c40abddabb3118368f..9400845d602e0809c818d067bc417e8f10220684 100644 (file)
@@ -79,8 +79,8 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        md_io.error = 0;
 
        if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
-               rw |= (1 << BIO_RW_BARRIER);
-       rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO));
+               rw |= REQ_HARDBARRIER;
+       rw |= REQ_UNPLUG | REQ_SYNC;
 
  retry:
        bio = bio_alloc(GFP_NOIO, 1);
@@ -103,11 +103,11 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
        /* check for unsupported barrier op.
         * would rather check on EOPNOTSUPP, but that is not reliable.
         * don't try again for ANY return value != 0 */
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) {
+       if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
                /* Try again with no barrier */
                dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
                set_bit(MD_NO_BARRIER, &mdev->flags);
-               rw &= ~(1 << BIO_RW_BARRIER);
+               rw &= ~REQ_HARDBARRIER;
                bio_put(bio);
                goto retry;
        }
index 7258c95e895e3c3ff7c91bdc3660154d55061413..e2ab13d99d6951b6e8c6a865ae75c56678bf3072 100644 (file)
@@ -2425,15 +2425,15 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
        /* NOTE: no need to check if barriers supported here as we would
         *       not pass the test in make_request_common in that case
         */
-       if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
+       if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
                dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
                /* dp_flags |= DP_HARDBARRIER; */
        }
-       if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
+       if (req->master_bio->bi_rw & REQ_SYNC)
                dp_flags |= DP_RW_SYNC;
        /* for now handle SYNCIO and UNPLUG
         * as if they still were one and the same flag */
-       if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
+       if (req->master_bio->bi_rw & REQ_UNPLUG)
                dp_flags |= DP_RW_SYNC;
        if (mdev->state.conn >= C_SYNC_SOURCE &&
            mdev->state.conn <= C_PAUSED_SYNC_T)
index dff48701b84d8784f0f85a9202abbaaf16c522fc..cba1deb7b271161048bab456400e8555f310b030 100644 (file)
@@ -1180,7 +1180,7 @@ next_bio:
        bio->bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        /* we special case some flags in the multi-bio case, see below
-        * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+        * (REQ_UNPLUG, REQ_HARDBARRIER) */
        bio->bi_rw = rw;
        bio->bi_private = e;
        bio->bi_end_io = drbd_endio_sec;
@@ -1209,16 +1209,16 @@ next_bio:
                bios = bios->bi_next;
                bio->bi_next = NULL;
 
-               /* strip off BIO_RW_UNPLUG unless it is the last bio */
+               /* strip off REQ_UNPLUG unless it is the last bio */
                if (bios)
-                       bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+                       bio->bi_rw &= ~REQ_UNPLUG;
 
                drbd_generic_make_request(mdev, fault_type, bio);
 
-               /* strip off BIO_RW_BARRIER,
+               /* strip off REQ_HARDBARRIER,
                 * unless it is the first or last bio */
                if (bios && bios->bi_next)
-                       bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+                       bios->bi_rw &= ~REQ_HARDBARRIER;
        } while (bios);
        maybe_kick_lo(mdev);
        return 0;
@@ -1233,7 +1233,7 @@ fail:
 }
 
 /**
- * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
+ * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
  * @mdev:      DRBD device.
  * @w:         work object.
  * @cancel:    The connection will be closed anyways (unused in this callback)
@@ -1245,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
           (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
           so that we can finish that epoch in drbd_may_finish_epoch().
           That is necessary if we already have a long chain of Epochs, before
-          we realize that BIO_RW_BARRIER is actually not supported */
+          we realize that REQ_HARDBARRIER is actually not supported */
 
        /* As long as the -ENOTSUPP on the barrier is reported immediately
           that will never trigger. If it is reported late, we will just
@@ -1824,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
                epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
                if (epoch == e->epoch) {
                        set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                       rw |= (1<<BIO_RW_BARRIER);
+                       rw |= REQ_HARDBARRIER;
                        e->flags |= EE_IS_BARRIER;
                } else {
                        if (atomic_read(&epoch->epoch_size) > 1 ||
                            !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
                                set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
                                set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                               rw |= (1<<BIO_RW_BARRIER);
+                               rw |= REQ_HARDBARRIER;
                                e->flags |= EE_IS_BARRIER;
                        }
                }
@@ -1841,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
        dp_flags = be32_to_cpu(p->dp_flags);
        if (dp_flags & DP_HARDBARRIER) {
                dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
-               /* rw |= (1<<BIO_RW_BARRIER); */
+               /* rw |= REQ_HARDBARRIER; */
        }
        if (dp_flags & DP_RW_SYNC)
-               rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
        if (dp_flags & DP_MAY_SET_IN_SYNC)
                e->flags |= EE_MAY_SET_IN_SYNC;
 
index 654f1ef5cbb0fb6e21c25430e8e2cace1a84630f..f761d98a4e90320998bd54099ea8c4caa56154ac 100644 (file)
@@ -997,7 +997,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
         * because of those XXX, this is not yet enabled,
         * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
         */
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
                /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
                bio_endio(bio, -EOPNOTSUPP);
                return 0;
index 6120922f459f3708685cffa46a51e5517c89b18c..fedfdb7d3cdfae59bcfda37d09e08ea91bae1ecf 100644 (file)
@@ -476,7 +476,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
        pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
 
        if (bio_rw(bio) == WRITE) {
-               bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER);
+               bool barrier = (bio->bi_rw & REQ_HARDBARRIER);
                struct file *file = lo->lo_backing_file;
 
                if (barrier) {
index 8a549db2aa7859b588e106c981301dbd5118aec6..9f3e4454274b62748ecaf4663c79ac7608edac15 100644 (file)
@@ -1221,7 +1221,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
        pkt->bio->bi_flags = 1 << BIO_UPTODATE;
        pkt->bio->bi_idx = 0;
 
-       BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
+       BUG_ON(pkt->bio->bi_rw != REQ_WRITE);
        BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
        BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
        BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
index 2f9470ff8f7cec7c7d3de70ea0c347a0e92bf4e1..8be57151f5d6570cd9b9c9a629618bc23ae3466b 100644 (file)
@@ -478,7 +478,7 @@ static void process_page(unsigned long data)
                                le32_to_cpu(desc->local_addr)>>9,
                                le32_to_cpu(desc->transfer_size));
                        dump_dmastat(card, control);
-               } else if (test_bit(BIO_RW, &bio->bi_rw) &&
+               } else if ((bio->bi_rw & REQ_WRITE) &&
                           le32_to_cpu(desc->local_addr) >> 9 ==
                                card->init_size) {
                        card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
index 02712bf045c1bdece00d845d35662fd065e12c41..766b3deeb23c75f51eb5618a90e891d2fb220d90 100644 (file)
@@ -454,7 +454,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi,
           touch it at all. */
 
        if (cgc->data_direction == CGC_DATA_WRITE)
-               flags |= REQ_RW;
+               flags |= REQ_WRITE;
 
        if (cgc->sense)
                memset(cgc->sense, 0, sizeof(struct request_sense));
index c7d0737bb18a9a9435137052c5c43544ffab5436..5406b6ea3ad1d2118996a405da5b33ac1359e4ad 100644 (file)
@@ -207,7 +207,7 @@ static void idefloppy_create_rw_cmd(ide_drive_t *drive,
        memcpy(rq->cmd, pc->c, 12);
 
        pc->rq = rq;
-       if (rq->cmd_flags & REQ_RW)
+       if (rq->cmd_flags & REQ_WRITE)
                pc->flags |= PC_FLAG_WRITING;
 
        pc->flags |= PC_FLAG_DMA_OK;
index 10f457ca6af2568ab9073f8a7ab7477a0f946a94..0590c75b0ab68e7f49a9f7d0adfa82f09fa2badf 100644 (file)
@@ -356,7 +356,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
        BUG_ON(num_regions > DM_IO_MAX_REGIONS);
 
        if (sync)
-               rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
 
        /*
         * For multiple regions we need to be careful to rewind
@@ -364,7 +364,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
         */
        for (i = 0; i < num_regions; i++) {
                *dp = old_pages;
-               if (where[i].count || (rw & (1 << BIO_RW_BARRIER)))
+               if (where[i].count || (rw & REQ_HARDBARRIER))
                        do_region(rw, i, where + i, dp, io);
        }
 
@@ -412,8 +412,8 @@ retry:
        }
        set_current_state(TASK_RUNNING);
 
-       if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) {
-               rw &= ~(1 << BIO_RW_BARRIER);
+       if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
+               rw &= ~REQ_HARDBARRIER;
                goto retry;
        }
 
@@ -479,8 +479,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
  * New collapsed (a)synchronous interface.
  *
  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
- * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
- * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
+ * the queue with blk_unplug() some time later or set REQ_SYNC in
+io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
  */
 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
index addf83475040d326145d67bad45f535d40c41407..d8587bac5682f41673cc9a20695a146c30415a31 100644 (file)
@@ -345,7 +345,7 @@ static int run_io_job(struct kcopyd_job *job)
 {
        int r;
        struct dm_io_request io_req = {
-               .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG),
+               .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG,
                .mem.type = DM_IO_PAGE_LIST,
                .mem.ptr.pl = job->pages,
                .mem.offset = job->offset,
index ddda531723dcecd5e53d3dc644e41743dc6379a8..74136262d6542cbd599519645b6aaac3c6f1db74 100644 (file)
@@ -1211,7 +1211,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
        if (error == -EOPNOTSUPP)
                goto out;
 
-       if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+       if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
                goto out;
 
        if (unlikely(error)) {
index e610725db766f643feb7c5c7b932e2c6562e5640..d6e28d732b4d7de33e339ab32145634b6b170034 100644 (file)
@@ -284,7 +284,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
        if (!error)
                return 0; /* I/O complete */
 
-       if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
+       if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD))
                return error;
 
        if (error == -EOPNOTSUPP)
index 1e0e6dd51501036a4993e96714ca20a3ede1c2c8..d6f77baeafd605527217dd62a056f34be2bc2aed 100644 (file)
@@ -614,7 +614,7 @@ static void dec_pending(struct dm_io *io, int error)
                         */
                        spin_lock_irqsave(&md->deferred_lock, flags);
                        if (__noflush_suspending(md)) {
-                               if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
+                               if (!(io->bio->bi_rw & REQ_HARDBARRIER))
                                        bio_list_add_head(&md->deferred,
                                                          io->bio);
                        } else
@@ -626,7 +626,7 @@ static void dec_pending(struct dm_io *io, int error)
                io_error = io->error;
                bio = io->bio;
 
-               if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
+               if (bio->bi_rw & REQ_HARDBARRIER) {
                        /*
                         * There can be just one barrier request so we use
                         * a per-device variable for error reporting.
@@ -1106,7 +1106,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
 
        clone->bi_sector = sector;
        clone->bi_bdev = bio->bi_bdev;
-       clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
+       clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
        clone->bi_vcnt = 1;
        clone->bi_size = to_bytes(len);
        clone->bi_io_vec->bv_offset = offset;
@@ -1133,7 +1133,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
 
        clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
        __bio_clone(clone, bio);
-       clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
+       clone->bi_rw &= ~REQ_HARDBARRIER;
        clone->bi_destructor = dm_bio_destructor;
        clone->bi_sector = sector;
        clone->bi_idx = idx;
@@ -1301,7 +1301,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
 
        ci.map = dm_get_live_table(md);
        if (unlikely(!ci.map)) {
-               if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
+               if (!(bio->bi_rw & REQ_HARDBARRIER))
                        bio_io_error(bio);
                else
                        if (!md->barrier_error)
@@ -1414,7 +1414,7 @@ static int _dm_request(struct request_queue *q, struct bio *bio)
         * we have to queue this io for later.
         */
        if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
-           unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+           unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                up_read(&md->io_lock);
 
                if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
@@ -2296,7 +2296,7 @@ static void dm_wq_work(struct work_struct *work)
                if (dm_request_based(md))
                        generic_make_request(c);
                else {
-                       if (bio_rw_flagged(c, BIO_RW_BARRIER))
+                       if (c->bi_rw & REQ_HARDBARRIER)
                                process_barrier(md, c);
                        else
                                __split_and_process_bio(md, c);
index 7e0e057db9a704740220e032a1592ceb0f94f9b5..ba19060bcf3f7142868c6d401f5dbc2e780fe633 100644 (file)
@@ -294,7 +294,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio)
        dev_info_t *tmp_dev;
        sector_t start_sector;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
index cb20d0b0555adee7c12d5e643a65d142f7f054e0..1893af6787790a7d7919de4f478313fa4a2ba957 100644 (file)
@@ -353,7 +353,7 @@ static void md_submit_barrier(struct work_struct *ws)
                /* an empty barrier - all done */
                bio_endio(bio, 0);
        else {
-               bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+               bio->bi_rw &= ~REQ_HARDBARRIER;
                if (mddev->pers->make_request(mddev, bio))
                        generic_make_request(bio);
                mddev->barrier = POST_REQUEST_BARRIER;
@@ -675,11 +675,11 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
         * if zero is reached.
         * If an error occurred, call md_error
         *
-        * As we might need to resubmit the request if BIO_RW_BARRIER
+        * As we might need to resubmit the request if REQ_HARDBARRIER
         * causes ENOTSUPP, we allocate a spare bio...
         */
        struct bio *bio = bio_alloc(GFP_NOIO, 1);
-       int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+       int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG;
 
        bio->bi_bdev = rdev->bdev;
        bio->bi_sector = sector;
@@ -691,7 +691,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
        atomic_inc(&mddev->pending_writes);
        if (!test_bit(BarriersNotsupp, &rdev->flags)) {
                struct bio *rbio;
-               rw |= (1<<BIO_RW_BARRIER);
+               rw |= REQ_HARDBARRIER;
                rbio = bio_clone(bio, GFP_NOIO);
                rbio->bi_private = bio;
                rbio->bi_end_io = super_written_barrier;
@@ -736,7 +736,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size,
        struct completion event;
        int ret;
 
-       rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+       rw |= REQ_SYNC | REQ_UNPLUG;
 
        bio->bi_bdev = bdev;
        bio->bi_sector = sector;
index 10597bfec0001c6ec3d732cab348ba459ee89a1a..fc56e0f21c80ac9774d8271dff26129f70bb9c77 100644 (file)
@@ -67,7 +67,7 @@ struct mdk_rdev_s
 #define        Faulty          1               /* device is known to have a fault */
 #define        In_sync         2               /* device is in_sync with rest of array */
 #define        WriteMostly     4               /* Avoid reading if at all possible */
-#define        BarriersNotsupp 5               /* BIO_RW_BARRIER is not supported */
+#define        BarriersNotsupp 5               /* REQ_HARDBARRIER is not supported */
 #define        AllReserved     6               /* If whole device is reserved for
                                         * one array */
 #define        AutoDetected    7               /* added by auto-detect */
@@ -254,7 +254,7 @@ struct mddev_s
                                                         * fails.  Only supported
                                                         */
        struct bio                      *biolist;       /* bios that need to be retried
-                                                        * because BIO_RW_BARRIER is not supported
+                                                        * because REQ_HARDBARRIER is not supported
                                                         */
 
        atomic_t                        recovery_active; /* blocks scheduled, but not written */
index 410fb60699ac1d2b2ca4af62bca7ed89b2af60d7..0307d217e7a4c399f52550482cd945942d416d30 100644 (file)
@@ -91,7 +91,7 @@ static void multipath_end_request(struct bio *bio, int error)
 
        if (uptodate)
                multipath_end_bh_io(mp_bh, 0);
-       else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) {
+       else if (!(bio->bi_rw & REQ_RAHEAD)) {
                /*
                 * oops, IO error:
                 */
@@ -142,7 +142,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
        struct multipath_bh * mp_bh;
        struct multipath_info *multipath;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
@@ -163,7 +163,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio)
        mp_bh->bio = *bio;
        mp_bh->bio.bi_sector += multipath->rdev->data_offset;
        mp_bh->bio.bi_bdev = multipath->rdev->bdev;
-       mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+       mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
        mp_bh->bio.bi_end_io = multipath_end_request;
        mp_bh->bio.bi_private = mp_bh;
        generic_make_request(&mp_bh->bio);
@@ -398,7 +398,7 @@ static void multipathd (mddev_t *mddev)
                        *bio = *(mp_bh->master_bio);
                        bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset;
                        bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
-                       bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT);
+                       bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
                        bio->bi_end_io = multipath_end_request;
                        bio->bi_private = mp_bh;
                        generic_make_request(bio);
index 563abed5a2cb73b68cc86d4230b8e1ab0a510b61..6f7af46d623c9d1cb4dccc8fdccff28b0e5bfa2b 100644 (file)
@@ -483,7 +483,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio)
        struct strip_zone *zone;
        mdk_rdev_t *tmp_dev;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
index a948da8012de205f37bb5e7498df7235b374b7ca..73cc74ffc26bd5eefb7166aa37130e4c47daa56a 100644 (file)
@@ -787,7 +787,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        struct bio_list bl;
        struct page **behind_pages = NULL;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool do_sync = (bio->bi_rw & REQ_SYNC);
        bool do_barriers;
        mdk_rdev_t *blocked_rdev;
 
@@ -822,7 +822,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                finish_wait(&conf->wait_barrier, &w);
        }
        if (unlikely(!mddev->barriers_work &&
-                    bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+                    (bio->bi_rw & REQ_HARDBARRIER))) {
                if (rw == WRITE)
                        md_write_end(mddev);
                bio_endio(bio, -EOPNOTSUPP);
@@ -877,7 +877,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid1_end_read_request;
-               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+               read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r1_bio;
 
                generic_make_request(read_bio);
@@ -959,7 +959,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        atomic_set(&r1_bio->remaining, 0);
        atomic_set(&r1_bio->behind_remaining, 0);
 
-       do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER);
+       do_barriers = bio->bi_rw & REQ_HARDBARRIER;
        if (do_barriers)
                set_bit(R1BIO_Barrier, &r1_bio->state);
 
@@ -975,8 +975,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) |
-                       (do_sync << BIO_RW_SYNCIO);
+               mbio->bi_rw = WRITE | do_barriers | do_sync;
                mbio->bi_private = r1_bio;
 
                if (behind_pages) {
@@ -1633,7 +1632,7 @@ static void raid1d(mddev_t *mddev)
                        sync_request_write(mddev, r1_bio);
                        unplug = 1;
                } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
-                       /* some requests in the r1bio were BIO_RW_BARRIER
+                       /* some requests in the r1bio were REQ_HARDBARRIER
                         * requests which failed with -EOPNOTSUPP.  Hohumm..
                         * Better resubmit without the barrier.
                         * We know which devices to resubmit for, because
@@ -1641,7 +1640,7 @@ static void raid1d(mddev_t *mddev)
                         * We already have a nr_pending reference on these rdevs.
                         */
                        int i;
-                       const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+                       const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC);
                        clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
                        clear_bit(R1BIO_Barrier, &r1_bio->state);
                        for (i=0; i < conf->raid_disks; i++)
@@ -1662,8 +1661,7 @@ static void raid1d(mddev_t *mddev)
                                                conf->mirrors[i].rdev->data_offset;
                                        bio->bi_bdev = conf->mirrors[i].rdev->bdev;
                                        bio->bi_end_io = raid1_end_write_request;
-                                       bio->bi_rw = WRITE |
-                                               (do_sync << BIO_RW_SYNCIO);
+                                       bio->bi_rw = WRITE | do_sync;
                                        bio->bi_private = r1_bio;
                                        r1_bio->bios[i] = bio;
                                        generic_make_request(bio);
@@ -1698,7 +1696,7 @@ static void raid1d(mddev_t *mddev)
                                       (unsigned long long)r1_bio->sector);
                                raid_end_bio_io(r1_bio);
                        } else {
-                               const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO);
+                               const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC;
                                r1_bio->bios[r1_bio->read_disk] =
                                        mddev->ro ? IO_BLOCKED : NULL;
                                r1_bio->read_disk = disk;
@@ -1715,7 +1713,7 @@ static void raid1d(mddev_t *mddev)
                                bio->bi_sector = r1_bio->sector + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
                                bio->bi_end_io = raid1_end_read_request;
-                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+                               bio->bi_rw = READ | do_sync;
                                bio->bi_private = r1_bio;
                                unplug = 1;
                                generic_make_request(bio);
index 42e64e4e5e2503fb4b81c12ac6706932f4397ba4..62ecb6650fd023ce10c5dc619be058fa480f8d91 100644 (file)
@@ -799,12 +799,12 @@ static int make_request(mddev_t *mddev, struct bio * bio)
        int i;
        int chunk_sects = conf->chunk_mask + 1;
        const int rw = bio_data_dir(bio);
-       const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO);
+       const bool do_sync = (bio->bi_rw & REQ_SYNC);
        struct bio_list bl;
        unsigned long flags;
        mdk_rdev_t *blocked_rdev;
 
-       if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
+       if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
                md_barrier_request(mddev, bio);
                return 0;
        }
@@ -879,7 +879,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        mirror->rdev->data_offset;
                read_bio->bi_bdev = mirror->rdev->bdev;
                read_bio->bi_end_io = raid10_end_read_request;
-               read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+               read_bio->bi_rw = READ | do_sync;
                read_bio->bi_private = r10_bio;
 
                generic_make_request(read_bio);
@@ -947,7 +947,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
                        conf->mirrors[d].rdev->data_offset;
                mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
                mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO);
+               mbio->bi_rw = WRITE | do_sync;
                mbio->bi_private = r10_bio;
 
                atomic_inc(&r10_bio->remaining);
@@ -1716,7 +1716,7 @@ static void raid10d(mddev_t *mddev)
                                raid_end_bio_io(r10_bio);
                                bio_put(bio);
                        } else {
-                               const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO);
+                               const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
                                bio_put(bio);
                                rdev = conf->mirrors[mirror].rdev;
                                if (printk_ratelimit())
@@ -1730,7 +1730,7 @@ static void raid10d(mddev_t *mddev)
                                bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr
                                        + rdev->data_offset;
                                bio->bi_bdev = rdev->bdev;
-                               bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO);
+                               bio->bi_rw = READ | do_sync;
                                bio->bi_private = r10_bio;
                                bio->bi_end_io = raid10_end_read_request;
                                unplug = 1;
index 96c690279fc6b58c7147c5860bf16417baa2149e..20ac2f14376a1417552982697cde2b303af7afd2 100644 (file)
@@ -3958,7 +3958,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
        const int rw = bio_data_dir(bi);
        int remaining;
 
-       if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
+       if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) {
                /* Drain all pending writes.  We only really need
                 * to ensure they have been submitted, but this is
                 * easier.
index ee4b6914667f9cb2a55c9f20f493bfb971357a9d..fda4de3440c4640f6754a1a66d143aeb5da8936c 100644 (file)
@@ -716,7 +716,7 @@ static int _osd_req_list_objects(struct osd_request *or,
                return PTR_ERR(bio);
        }
 
-       bio->bi_rw &= ~(1 << BIO_RW);
+       bio->bi_rw &= ~REQ_WRITE;
        or->in.bio = bio;
        or->in.total_bytes = bio->bi_size;
        return 0;
@@ -814,7 +814,7 @@ void osd_req_write(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
        WARN_ON(or->out.bio || or->out.total_bytes);
-       WARN_ON(0 ==  bio_rw_flagged(bio, BIO_RW));
+       WARN_ON(0 == (bio->bi_rw & REQ_WRITE));
        or->out.bio = bio;
        or->out.total_bytes = len;
 }
@@ -829,7 +829,7 @@ int osd_req_write_kern(struct osd_request *or,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
+       bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
        osd_req_write(or, obj, offset, bio, len);
        return 0;
 }
@@ -865,7 +865,7 @@ void osd_req_read(struct osd_request *or,
 {
        _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
+       WARN_ON(1 == (bio->bi_rw & REQ_WRITE));
        or->in.bio = bio;
        or->in.total_bytes = len;
 }
index e7bf6ca64dcf028caec480f4ecb82b2ff32d2463..8abb2dfb2e7c8b006f9c80b716808a1ff07a0189 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -843,7 +843,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
        if (!bio)
                goto out_bmd;
 
-       bio->bi_rw |= (!write_to_vm << BIO_RW);
+       if (!write_to_vm)
+               bio->bi_rw |= REQ_WRITE;
 
        ret = 0;
 
@@ -1024,7 +1025,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
         * set data direction, and check if mapped pages need bouncing
         */
        if (!write_to_vm)
-               bio->bi_rw |= (1 << BIO_RW);
+               bio->bi_rw |= REQ_WRITE;
 
        bio->bi_bdev = bdev;
        bio->bi_flags |= (1 << BIO_USER_MAPPED);
index 34f7c375567e38eef3f83843baf0bf2272282008..64f10082f0484274e2987f293a4d6f4d1e313684 100644 (file)
@@ -480,7 +480,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
        end_io_wq->work.func = end_workqueue_fn;
        end_io_wq->work.flags = 0;
 
-       if (bio->bi_rw & (1 << BIO_RW)) {
+       if (bio->bi_rw & REQ_WRITE) {
                if (end_io_wq->metadata)
                        btrfs_queue_worker(&fs_info->endio_meta_write_workers,
                                           &end_io_wq->work);
@@ -604,7 +604,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
 
        atomic_inc(&fs_info->nr_async_submits);
 
-       if (rw & (1 << BIO_RW_SYNCIO))
+       if (rw & REQ_SYNC)
                btrfs_set_work_high_prio(&async->work);
 
        btrfs_queue_worker(&fs_info->workers, &async->work);
@@ -668,7 +668,7 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
                                          bio, 1);
        BUG_ON(ret);
 
-       if (!(rw & (1 << BIO_RW))) {
+       if (!(rw & REQ_WRITE)) {
                /*
                 * called for a read, do the setup so that checksum validation
                 * can happen in the async kernel threads
@@ -1427,7 +1427,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
         * ram and up to date before trying to verify things.  For
         * blocksize <= pagesize, it is basically a noop
         */
-       if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata &&
+       if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata &&
            !bio_ready_for_csum(bio)) {
                btrfs_queue_worker(&fs_info->endio_meta_workers,
                                   &end_io_wq->work);
index 1bff92ad474434a535b711abebb14d7a692cabca..e975d7180a88245db56830044df334fadb926c65 100644 (file)
@@ -1429,7 +1429,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
        ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
        BUG_ON(ret);
 
-       if (!(rw & (1 << BIO_RW))) {
+       if (!(rw & REQ_WRITE)) {
                if (bio_flags & EXTENT_BIO_COMPRESSED) {
                        return btrfs_submit_compressed_read(inode, bio,
                                                    mirror_num, bio_flags);
@@ -1841,7 +1841,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
        bio->bi_size = 0;
 
        bio_add_page(bio, page, failrec->len, start - page_offset(page));
-       if (failed_bio->bi_rw & (1 << BIO_RW))
+       if (failed_bio->bi_rw & REQ_WRITE)
                rw = WRITE;
        else
                rw = READ;
@@ -5642,7 +5642,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
        struct bio_vec *bvec = bio->bi_io_vec;
        u64 start;
        int skip_sum;
-       int write = rw & (1 << BIO_RW);
+       int write = rw & REQ_WRITE;
        int ret = 0;
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
index d6e3af8be95b9a1509b6b61f29ae8841ce52af31..dd318ff280b22a18167f65e538d5215b4b03e3c5 100644 (file)
@@ -258,7 +258,7 @@ loop_lock:
 
                BUG_ON(atomic_read(&cur->bi_cnt) == 0);
 
-               if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
+               if (cur->bi_rw & REQ_SYNC)
                        num_sync_run++;
 
                submit_bio(cur->bi_rw, cur);
@@ -2651,7 +2651,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        int max_errors = 0;
        struct btrfs_multi_bio *multi = NULL;
 
-       if (multi_ret && !(rw & (1 << BIO_RW)))
+       if (multi_ret && !(rw & REQ_WRITE))
                stripes_allocated = 1;
 again:
        if (multi_ret) {
@@ -2687,7 +2687,7 @@ again:
                mirror_num = 0;
 
        /* if our multi bio struct is too small, back off and try again */
-       if (rw & (1 << BIO_RW)) {
+       if (rw & REQ_WRITE) {
                if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
                                 BTRFS_BLOCK_GROUP_DUP)) {
                        stripes_required = map->num_stripes;
@@ -2697,7 +2697,7 @@ again:
                        max_errors = 1;
                }
        }
-       if (multi_ret && (rw & (1 << BIO_RW)) &&
+       if (multi_ret && (rw & REQ_WRITE) &&
            stripes_allocated < stripes_required) {
                stripes_allocated = map->num_stripes;
                free_extent_map(em);
@@ -2733,7 +2733,7 @@ again:
        num_stripes = 1;
        stripe_index = 0;
        if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
-               if (unplug_page || (rw & (1 << BIO_RW)))
+               if (unplug_page || (rw & REQ_WRITE))
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
@@ -2744,7 +2744,7 @@ again:
                }
 
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
-               if (rw & (1 << BIO_RW))
+               if (rw & REQ_WRITE)
                        num_stripes = map->num_stripes;
                else if (mirror_num)
                        stripe_index = mirror_num - 1;
@@ -2755,7 +2755,7 @@ again:
                stripe_index = do_div(stripe_nr, factor);
                stripe_index *= map->sub_stripes;
 
-               if (unplug_page || (rw & (1 << BIO_RW)))
+               if (unplug_page || (rw & REQ_WRITE))
                        num_stripes = map->sub_stripes;
                else if (mirror_num)
                        stripe_index += mirror_num - 1;
@@ -2945,7 +2945,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        struct btrfs_pending_bios *pending_bios;
 
        /* don't bother with additional async steps for reads, right now */
-       if (!(rw & (1 << BIO_RW))) {
+       if (!(rw & REQ_WRITE)) {
                bio_get(bio);
                submit_bio(rw, bio);
                bio_put(bio);
@@ -2964,7 +2964,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        bio->bi_rw |= rw;
 
        spin_lock(&device->io_lock);
-       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
+       if (bio->bi_rw & REQ_SYNC)
                pending_bios = &device->pending_sync_bios;
        else
                pending_bios = &device->pending_bios;
index 4337cad7777b13f98c1d428942387d61a8503c4e..e2732203fa9373c2443dcff83fce7b0e4af516bc 100644 (file)
@@ -599,7 +599,7 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
                        } else {
                                bio = master_dev->bio;
                                /* FIXME: bio_set_dir() */
-                               bio->bi_rw |= (1 << BIO_RW);
+                               bio->bi_rw |= REQ_WRITE;
                        }
 
                        osd_req_write(or, &ios->obj, per_dev->offset, bio,
index efc3539ac5a1f2870779522227d40a23c07a0b20..cde1248a62255ae9bdb03b4c2757a71e973fa089 100644 (file)
@@ -595,7 +595,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
        if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
                goto skip_barrier;
        get_bh(bh);
-       submit_bh(WRITE_BARRIER | (1 << BIO_RW_META), bh);
+       submit_bh(WRITE_BARRIER | REQ_META, bh);
        wait_on_buffer(bh);
        if (buffer_eopnotsupp(bh)) {
                clear_buffer_eopnotsupp(bh);
@@ -605,7 +605,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags, int pull)
                lock_buffer(bh);
 skip_barrier:
                get_bh(bh);
-               submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh);
+               submit_bh(WRITE_SYNC | REQ_META, bh);
                wait_on_buffer(bh);
        }
        if (!buffer_uptodate(bh))
index 18176d0b75d775901268e038f621765a56d54752..f3b071f921aa6ea723e35b483675d4c5ee4151c8 100644 (file)
@@ -36,8 +36,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
 {
        struct buffer_head *bh, *head;
        int nr_underway = 0;
-       int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
-                       WRITE_SYNC_PLUG : WRITE));
+       int write_op = REQ_META |
+               (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE);
 
        BUG_ON(!PageLocked(page));
        BUG_ON(!page_has_buffers(page));
@@ -225,7 +225,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
        }
        bh->b_end_io = end_buffer_read_sync;
        get_bh(bh);
-       submit_bh(READ_SYNC | (1 << BIO_RW_META), bh);
+       submit_bh(READ_SYNC | REQ_META, bh);
        if (!(flags & DIO_WAIT))
                return 0;
 
@@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
        if (buffer_uptodate(first_bh))
                goto out;
        if (!buffer_locked(first_bh))
-               ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh);
+               ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
 
        dblock++;
        extlen--;
index 3593b3a7290e874b1d3d6a2238a2a83567685b04..fd4f8946abf504966ecf1e700eea8e1ff84641a7 100644 (file)
@@ -275,7 +275,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector)
 
        bio->bi_end_io = end_bio_io_page;
        bio->bi_private = page;
-       submit_bio(READ_SYNC | (1 << BIO_RW_META), bio);
+       submit_bio(READ_SYNC | REQ_META, bio);
        wait_on_page_locked(page);
        bio_put(bio);
        if (!PageUptodate(page)) {
index 2e6a2723b8fa56e3c65e3a83af3aa346184ce510..4588fb9e93df70a01c9bd379644c4a18a7560b42 100644 (file)
@@ -508,7 +508,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
                 * Last BIO is always sent through the following
                 * submission.
                 */
-               rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
                res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
        }
 
index 7fc5606e6ea584d71fc1104f97b1a7e479c640bd..4d379c8250aea32d9fd21b0331a3aab9d9e85923 100644 (file)
@@ -138,55 +138,83 @@ struct bio {
 #define BIO_POOL_IDX(bio)      ((bio)->bi_flags >> BIO_POOL_OFFSET)    
 
 /*
- * bio bi_rw flags
- *
- * bit 0 -- data direction
- *     If not set, bio is a read from device. If set, it's a write to device.
- * bit 1 -- fail fast device errors
- * bit 2 -- fail fast transport errors
- * bit 3 -- fail fast driver errors
- * bit 4 -- rw-ahead when set
- * bit 5 -- barrier
- *     Insert a serialization point in the IO queue, forcing previously
- *     submitted IO to be completed before this one is issued.
- * bit 6 -- synchronous I/O hint.
- * bit 7 -- Unplug the device immediately after submitting this bio.
- * bit 8 -- metadata request
- *     Used for tracing to differentiate metadata and data IO. May also
- *     get some preferential treatment in the IO scheduler
- * bit 9 -- discard sectors
- *     Informs the lower level device that this range of sectors is no longer
- *     used by the file system and may thus be freed by the device. Used
- *     for flash based storage.
- *     Don't want driver retries for any fast fail whatever the reason.
- * bit 10 -- Tell the IO scheduler not to wait for more requests after this
-       one has been submitted, even if it is a SYNC request.
+ * Request flags.  For use in the cmd_flags field of struct request, and in
+ * bi_rw of struct bio.  Note that some flags are only valid in either one.
  */
-enum bio_rw_flags {
-       BIO_RW,
-       BIO_RW_FAILFAST_DEV,
-       BIO_RW_FAILFAST_TRANSPORT,
-       BIO_RW_FAILFAST_DRIVER,
-       /* above flags must match REQ_* */
-       BIO_RW_AHEAD,
-       BIO_RW_BARRIER,
-       BIO_RW_SYNCIO,
-       BIO_RW_UNPLUG,
-       BIO_RW_META,
-       BIO_RW_DISCARD,
-       BIO_RW_NOIDLE,
+enum rq_flag_bits {
+       /* common flags */
+       __REQ_WRITE,            /* not set, read. set, write */
+       __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
+       __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
+       __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
+
+       __REQ_HARDBARRIER,      /* may not be passed by drive either */
+       __REQ_SYNC,             /* request is sync (sync write or read) */
+       __REQ_META,             /* metadata io request */
+       __REQ_DISCARD,          /* request to discard sectors */
+       __REQ_NOIDLE,           /* don't anticipate more IO after this one */
+
+       /* bio only flags */
+       __REQ_UNPLUG,           /* unplug the immediately after submission */
+       __REQ_RAHEAD,           /* read ahead, can fail anytime */
+
+       /* request only flags */
+       __REQ_SORTED,           /* elevator knows about this request */
+       __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
+       __REQ_FUA,              /* forced unit access */
+       __REQ_NOMERGE,          /* don't touch this for merging */
+       __REQ_STARTED,          /* drive already may have started this one */
+       __REQ_DONTPREP,         /* don't call prep for this one */
+       __REQ_QUEUED,           /* uses queueing */
+       __REQ_ELVPRIV,          /* elevator private data attached */
+       __REQ_FAILED,           /* set if the request failed */
+       __REQ_QUIET,            /* don't worry about errors */
+       __REQ_PREEMPT,          /* set for "ide_preempt" requests */
+       __REQ_ORDERED_COLOR,    /* is before or after barrier */
+       __REQ_ALLOCED,          /* request came from our alloc pool */
+       __REQ_COPY_USER,        /* contains copies of user pages */
+       __REQ_INTEGRITY,        /* integrity metadata has been remapped */
+       __REQ_IO_STAT,          /* account I/O stat */
+       __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
+       __REQ_NR_BITS,          /* stops here */
 };
 
-/*
- * First four bits must match between bio->bi_rw and rq->cmd_flags, make
- * that explicit here.
- */
-#define BIO_RW_RQ_MASK         0xf
-
-static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
-{
-       return (bio->bi_rw & (1 << flag)) != 0;
-}
+#define REQ_WRITE              (1 << __REQ_WRITE)
+#define REQ_FAILFAST_DEV       (1 << __REQ_FAILFAST_DEV)
+#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
+#define REQ_FAILFAST_DRIVER    (1 << __REQ_FAILFAST_DRIVER)
+#define REQ_HARDBARRIER                (1 << __REQ_HARDBARRIER)
+#define REQ_SYNC               (1 << __REQ_SYNC)
+#define REQ_META               (1 << __REQ_META)
+#define REQ_DISCARD            (1 << __REQ_DISCARD)
+#define REQ_NOIDLE             (1 << __REQ_NOIDLE)
+
+#define REQ_FAILFAST_MASK \
+       (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
+#define REQ_COMMON_MASK \
+       (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
+        REQ_META| REQ_DISCARD | REQ_NOIDLE)
+
+#define REQ_UNPLUG             (1 << __REQ_UNPLUG)
+#define REQ_RAHEAD             (1 << __REQ_RAHEAD)
+
+#define REQ_SORTED             (1 << __REQ_SORTED)
+#define REQ_SOFTBARRIER                (1 << __REQ_SOFTBARRIER)
+#define REQ_FUA                        (1 << __REQ_FUA)
+#define REQ_NOMERGE            (1 << __REQ_NOMERGE)
+#define REQ_STARTED            (1 << __REQ_STARTED)
+#define REQ_DONTPREP           (1 << __REQ_DONTPREP)
+#define REQ_QUEUED             (1 << __REQ_QUEUED)
+#define REQ_ELVPRIV            (1 << __REQ_ELVPRIV)
+#define REQ_FAILED             (1 << __REQ_FAILED)
+#define REQ_QUIET              (1 << __REQ_QUIET)
+#define REQ_PREEMPT            (1 << __REQ_PREEMPT)
+#define REQ_ORDERED_COLOR      (1 << __REQ_ORDERED_COLOR)
+#define REQ_ALLOCED            (1 << __REQ_ALLOCED)
+#define REQ_COPY_USER          (1 << __REQ_COPY_USER)
+#define REQ_INTEGRITY          (1 << __REQ_INTEGRITY)
+#define REQ_IO_STAT            (1 << __REQ_IO_STAT)
+#define REQ_MIXED_MERGE                (1 << __REQ_MIXED_MERGE)
 
 /*
  * upper 16 bits of bi_rw define the io priority of this bio
@@ -211,7 +239,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
 #define bio_offset(bio)                bio_iovec((bio))->bv_offset
 #define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
 #define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD))
+#define bio_empty_barrier(bio) \
+       ((bio->bi_rw & REQ_HARDBARRIER) && \
+        !bio_has_data(bio) && \
+        !(bio->bi_rw & REQ_DISCARD))
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
index 3ecd28ef9ba44c1903c0baf06691d76acec5af94..3fc0f5908619e0b3fc878bbcec4b352faf1fbca7 100644 (file)
@@ -84,70 +84,6 @@ enum {
        REQ_LB_OP_FLUSH = 0x41,         /* flush request */
 };
 
-/*
- * request type modified bits. first four bits match BIO_RW* bits, important
- */
-enum rq_flag_bits {
-       __REQ_RW,               /* not set, read. set, write */
-       __REQ_FAILFAST_DEV,     /* no driver retries of device errors */
-       __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
-       __REQ_FAILFAST_DRIVER,  /* no driver retries of driver errors */
-       /* above flags must match BIO_RW_* */
-       __REQ_DISCARD,          /* request to discard sectors */
-       __REQ_SORTED,           /* elevator knows about this request */
-       __REQ_SOFTBARRIER,      /* may not be passed by ioscheduler */
-       __REQ_HARDBARRIER,      /* may not be passed by drive either */
-       __REQ_FUA,              /* forced unit access */
-       __REQ_NOMERGE,          /* don't touch this for merging */
-       __REQ_STARTED,          /* drive already may have started this one */
-       __REQ_DONTPREP,         /* don't call prep for this one */
-       __REQ_QUEUED,           /* uses queueing */
-       __REQ_ELVPRIV,          /* elevator private data attached */
-       __REQ_FAILED,           /* set if the request failed */
-       __REQ_QUIET,            /* don't worry about errors */
-       __REQ_PREEMPT,          /* set for "ide_preempt" requests */
-       __REQ_ORDERED_COLOR,    /* is before or after barrier */
-       __REQ_RW_SYNC,          /* request is sync (sync write or read) */
-       __REQ_ALLOCED,          /* request came from our alloc pool */
-       __REQ_RW_META,          /* metadata io request */
-       __REQ_COPY_USER,        /* contains copies of user pages */
-       __REQ_INTEGRITY,        /* integrity metadata has been remapped */
-       __REQ_NOIDLE,           /* Don't anticipate more IO after this one */
-       __REQ_IO_STAT,          /* account I/O stat */
-       __REQ_MIXED_MERGE,      /* merge of different types, fail separately */
-       __REQ_NR_BITS,          /* stops here */
-};
-
-#define REQ_RW         (1 << __REQ_RW)
-#define REQ_FAILFAST_DEV       (1 << __REQ_FAILFAST_DEV)
-#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
-#define REQ_FAILFAST_DRIVER    (1 << __REQ_FAILFAST_DRIVER)
-#define REQ_DISCARD    (1 << __REQ_DISCARD)
-#define REQ_SORTED     (1 << __REQ_SORTED)
-#define REQ_SOFTBARRIER        (1 << __REQ_SOFTBARRIER)
-#define REQ_HARDBARRIER        (1 << __REQ_HARDBARRIER)
-#define REQ_FUA                (1 << __REQ_FUA)
-#define REQ_NOMERGE    (1 << __REQ_NOMERGE)
-#define REQ_STARTED    (1 << __REQ_STARTED)
-#define REQ_DONTPREP   (1 << __REQ_DONTPREP)
-#define REQ_QUEUED     (1 << __REQ_QUEUED)
-#define REQ_ELVPRIV    (1 << __REQ_ELVPRIV)
-#define REQ_FAILED     (1 << __REQ_FAILED)
-#define REQ_QUIET      (1 << __REQ_QUIET)
-#define REQ_PREEMPT    (1 << __REQ_PREEMPT)
-#define REQ_ORDERED_COLOR      (1 << __REQ_ORDERED_COLOR)
-#define REQ_RW_SYNC    (1 << __REQ_RW_SYNC)
-#define REQ_ALLOCED    (1 << __REQ_ALLOCED)
-#define REQ_RW_META    (1 << __REQ_RW_META)
-#define REQ_COPY_USER  (1 << __REQ_COPY_USER)
-#define REQ_INTEGRITY  (1 << __REQ_INTEGRITY)
-#define REQ_NOIDLE     (1 << __REQ_NOIDLE)
-#define REQ_IO_STAT    (1 << __REQ_IO_STAT)
-#define REQ_MIXED_MERGE        (1 << __REQ_MIXED_MERGE)
-
-#define REQ_FAILFAST_MASK      (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
-                                REQ_FAILFAST_DRIVER)
-
 #define BLK_MAX_CDB    16
 
 /*
@@ -631,7 +567,7 @@ enum {
  */
 static inline bool rw_is_sync(unsigned int rw_flags)
 {
-       return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
+       return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
 }
 
 static inline bool rq_is_sync(struct request *rq)
index 5988788314978c426623ed9611516e8fe43901a0..c5c92943c76711a1cca4c7711d502a991a3e8762 100644 (file)
@@ -144,29 +144,31 @@ struct inodes_stat_t {
  *                     of this IO.
  *
  */
-#define RW_MASK                1
-#define RWA_MASK       2
-#define READ 0
-#define WRITE 1
-#define READA 2                /* read-ahead  - don't block if no resources */
-#define SWRITE 3       /* for ll_rw_block() - wait for buffer lock */
-#define READ_SYNC      (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
-#define READ_META      (READ | (1 << BIO_RW_META))
-#define WRITE_SYNC_PLUG        (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
-#define WRITE_SYNC     (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
-#define WRITE_ODIRECT_PLUG     (WRITE | (1 << BIO_RW_SYNCIO))
-#define WRITE_META     (WRITE | (1 << BIO_RW_META))
-#define SWRITE_SYNC_PLUG       \
-                       (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
-#define SWRITE_SYNC    (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
-#define WRITE_BARRIER  (WRITE_SYNC | (1 << BIO_RW_BARRIER))
+#define RW_MASK                        1
+#define RWA_MASK               2
+
+#define READ                   0
+#define WRITE                  1
+#define READA                  2 /* readahead  - don't block if no resources */
+#define SWRITE                 3 /* for ll_rw_block() - wait for buffer lock */
+
+#define READ_SYNC              (READ | REQ_SYNC | REQ_UNPLUG)
+#define READ_META              (READ | REQ_META)
+#define WRITE_SYNC_PLUG                (WRITE | REQ_SYNC | REQ_NOIDLE)
+#define WRITE_SYNC             (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
+#define WRITE_ODIRECT_PLUG     (WRITE | REQ_SYNC)
+#define WRITE_META             (WRITE | REQ_META)
+#define WRITE_BARRIER          (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
+                                REQ_HARDBARRIER)
+#define SWRITE_SYNC_PLUG       (SWRITE | REQ_SYNC | REQ_NOIDLE)
+#define SWRITE_SYNC            (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
 
 /*
  * These aren't really reads or writes, they pass down information about
  * parts of device that are now unused by the file system.
  */
-#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD))
-#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER))
+#define DISCARD_NOBARRIER      (WRITE | REQ_DISCARD)
+#define DISCARD_BARRIER                (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
 
 #define SEL_IN         1
 #define SEL_OUT                2
index 97024fd40cd5f684c0abfe0bfa8118389a0175b3..83bbc7c02df95fd3560bdac7340cc3804d167d38 100644 (file)
@@ -28,7 +28,7 @@
 static int submit(int rw, struct block_device *bdev, sector_t sector,
                struct page *page, struct bio **bio_chain)
 {
-       const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+       const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG;
        struct bio *bio;
 
        bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
index 4f149944cb89eb04980866a773974ea7db5b067b..3b4a695051b603fa74cb4f33b37e0e032bbcac46 100644 (file)
@@ -169,9 +169,12 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
                                 BLK_TC_ACT(BLK_TC_WRITE) };
 
+#define BLK_TC_HARDBARRIER     BLK_TC_BARRIER
+#define BLK_TC_RAHEAD          BLK_TC_AHEAD
+
 /* The ilog2() calls fall out because they're constant */
-#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
-         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
+#define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
+         (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
 
 /*
  * The worker for the various blk_add_trace*() types. Fills out a
@@ -194,9 +197,9 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
                return;
 
        what |= ddir_act[rw & WRITE];
-       what |= MASK_TC_BIT(rw, BARRIER);
-       what |= MASK_TC_BIT(rw, SYNCIO);
-       what |= MASK_TC_BIT(rw, AHEAD);
+       what |= MASK_TC_BIT(rw, HARDBARRIER);
+       what |= MASK_TC_BIT(rw, SYNC);
+       what |= MASK_TC_BIT(rw, RAHEAD);
        what |= MASK_TC_BIT(rw, META);
        what |= MASK_TC_BIT(rw, DISCARD);
 
@@ -662,7 +665,7 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
                return;
 
        if (rq->cmd_flags & REQ_DISCARD)
-               rw |= (1 << BIO_RW_DISCARD);
+               rw |= REQ_DISCARD;
 
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                what |= BLK_TC_ACT(BLK_TC_PC);
@@ -1755,20 +1758,20 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
 
        if (rw & WRITE)
                rwbs[i++] = 'W';
-       else if (rw & 1 << BIO_RW_DISCARD)
+       else if (rw & REQ_DISCARD)
                rwbs[i++] = 'D';
        else if (bytes)
                rwbs[i++] = 'R';
        else
                rwbs[i++] = 'N';
 
-       if (rw & 1 << BIO_RW_AHEAD)
+       if (rw & REQ_RAHEAD)
                rwbs[i++] = 'A';
-       if (rw & 1 << BIO_RW_BARRIER)
+       if (rw & REQ_HARDBARRIER)
                rwbs[i++] = 'B';
-       if (rw & 1 << BIO_RW_SYNCIO)
+       if (rw & REQ_SYNC)
                rwbs[i++] = 'S';
-       if (rw & 1 << BIO_RW_META)
+       if (rw & REQ_META)
                rwbs[i++] = 'M';
 
        rwbs[i] = '\0';
@@ -1780,7 +1783,7 @@ void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
        int bytes;
 
        if (rq->cmd_flags & REQ_DISCARD)
-               rw |= (1 << BIO_RW_DISCARD);
+               rw |= REQ_DISCARD;
 
        bytes = blk_rq_bytes(rq);
 
index 31a3b962230a930cecab5d223f43d6d790f2610a..2dee975bf469003dd02e2f318399c16a428e4937 100644 (file)
@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
                goto out;
        }
        if (wbc->sync_mode == WB_SYNC_ALL)
-               rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
        count_vm_event(PSWPOUT);
        set_page_writeback(page);
        unlock_page(page);