block: kill merge_bvec_fn() completely
authorKent Overstreet <kent.overstreet@gmail.com>
Tue, 28 Apr 2015 06:48:34 +0000 (23:48 -0700)
committerJens Axboe <axboe@fb.com>
Thu, 13 Aug 2015 18:31:57 +0000 (12:31 -0600)
As generic_make_request() is now able to handle arbitrarily sized bios,
it's no longer necessary for each individual block driver to define its
own ->merge_bvec_fn() callback. Remove every invocation completely.

Cc: Jens Axboe <axboe@kernel.dk>
Cc: Lars Ellenberg <drbd-dev@lists.linbit.com>
Cc: drbd-user@lists.linbit.com
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Yehuda Sadeh <yehuda@inktank.com>
Cc: Sage Weil <sage@inktank.com>
Cc: Alex Elder <elder@kernel.org>
Cc: ceph-devel@vger.kernel.org
Cc: Alasdair Kergon <agk@redhat.com>
Cc: Mike Snitzer <snitzer@redhat.com>
Cc: dm-devel@redhat.com
Cc: Neil Brown <neilb@suse.de>
Cc: linux-raid@vger.kernel.org
Cc: Christoph Hellwig <hch@infradead.org>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>
Acked-by: NeilBrown <neilb@suse.de> (for the 'md' bits)
Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
[dpark: also remove ->merge_bvec_fn() in dm-thin as well as
 dm-era-target, and resolve merge conflicts]
Signed-off-by: Dongsu Park <dpark@posteo.net>
Signed-off-by: Ming Lin <ming.l@ssi.samsung.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
32 files changed:
block/blk-merge.c
block/blk-settings.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_req.c
drivers/block/pktcdvd.c
drivers/block/rbd.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-era-target.c
drivers/md/dm-flakey.c
drivers/md/dm-linear.c
drivers/md/dm-log-writes.c
drivers/md/dm-raid.c
drivers/md/dm-snap.c
drivers/md/dm-stripe.c
drivers/md/dm-table.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/linear.c
drivers/md/md.c
drivers/md/md.h
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid0.h
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
include/linux/blkdev.h
include/linux/device-mapper.h

index d9c3a75e4a60a677f3d0aa4c0f1df19fad029ae1..0027def35f5a102fd7d031b5d5ed8cdcbaa66f0b 100644 (file)
@@ -69,24 +69,13 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        struct bio *split;
        struct bio_vec bv, bvprv;
        struct bvec_iter iter;
-       unsigned seg_size = 0, nsegs = 0;
+       unsigned seg_size = 0, nsegs = 0, sectors = 0;
        int prev = 0;
 
-       struct bvec_merge_data bvm = {
-               .bi_bdev        = bio->bi_bdev,
-               .bi_sector      = bio->bi_iter.bi_sector,
-               .bi_size        = 0,
-               .bi_rw          = bio->bi_rw,
-       };
-
        bio_for_each_segment(bv, bio, iter) {
-               if (q->merge_bvec_fn &&
-                   q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
-                       goto split;
-
-               bvm.bi_size += bv.bv_len;
+               sectors += bv.bv_len >> 9;
 
-               if (bvm.bi_size >> 9 > queue_max_sectors(q))
+               if (sectors > queue_max_sectors(q))
                        goto split;
 
                /*
index b38d8d723276254dcb4bb2d582f7ae35ec672cfc..9df73991b23175d5390470b1501c0e3f862cfff6 100644 (file)
@@ -53,28 +53,6 @@ void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
 }
 EXPORT_SYMBOL(blk_queue_unprep_rq);
 
-/**
- * blk_queue_merge_bvec - set a merge_bvec function for queue
- * @q:         queue
- * @mbfn:      merge_bvec_fn
- *
- * Usually queues have static limitations on the max sectors or segments that
- * we can put in a request. Stacking drivers may have some settings that
- * are dynamic, and thus we have to query the queue whether it is ok to
- * add a new bio_vec to a bio at a given offset or not. If the block device
- * has such limitations, it needs to register a merge_bvec_fn to control
- * the size of bio's sent to it. Note that a block device *must* allow a
- * single page to be added to an empty bio. The block device driver may want
- * to use the bio_split() function to deal with these bio's. By default
- * no merge_bvec_fn is defined for a queue, and only the fixed limits are
- * honored.
- */
-void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
-{
-       q->merge_bvec_fn = mbfn;
-}
-EXPORT_SYMBOL(blk_queue_merge_bvec);
-
 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
 {
        q->softirq_done_fn = fn;
index a08c4a9179f18ed797a79b2556a5f7125a5651b9..015c6e91b75683c000f9342b00e7898abb6be8df 100644 (file)
@@ -1450,7 +1450,6 @@ extern void do_submit(struct work_struct *ws);
 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
 extern void drbd_make_request(struct request_queue *q, struct bio *bio);
 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
-extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
 
 
index a1518539b8580420ea1d278c1daeeeb3e3721ae8..74d97f4bac3488ac767ef97b20dfb3e2277e50cf 100644 (file)
@@ -2774,7 +2774,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
           This triggers a max_bio_size message upon first attach or connect */
        blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
        blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
-       blk_queue_merge_bvec(q, drbd_merge_bvec);
        q->queue_lock = &resource->req_lock;
 
        device->md_io.page = alloc_page(GFP_KERNEL);
index 923c857b395b35ee4e5de2563a1f7e44781d091a..211592682169656ef4ea66f3def62ac45000b3eb 100644 (file)
@@ -1512,41 +1512,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
        __drbd_make_request(device, bio, start_jif);
 }
 
-/* This is called by bio_add_page().
- *
- * q->max_hw_sectors and other global limits are already enforced there.
- *
- * We need to call down to our lower level device,
- * in case it has special restrictions.
- *
- * We also may need to enforce configured max-bio-bvecs limits.
- *
- * As long as the BIO is empty we have to allow at least one bvec,
- * regardless of size and offset, so no need to ask lower levels.
- */
-int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
-{
-       struct drbd_device *device = (struct drbd_device *) q->queuedata;
-       unsigned int bio_size = bvm->bi_size;
-       int limit = DRBD_MAX_BIO_SIZE;
-       int backing_limit;
-
-       if (bio_size && get_ldev(device)) {
-               unsigned int max_hw_sectors = queue_max_hw_sectors(q);
-               struct request_queue * const b =
-                       device->ldev->backing_bdev->bd_disk->queue;
-               if (b->merge_bvec_fn) {
-                       bvm->bi_bdev = device->ldev->backing_bdev;
-                       backing_limit = b->merge_bvec_fn(b, bvm, bvec);
-                       limit = min(limit, backing_limit);
-               }
-               put_ldev(device);
-               if ((limit >> 9) > max_hw_sectors)
-                       limit = max_hw_sectors << 9;
-       }
-       return limit;
-}
-
 void request_timer_fn(unsigned long data)
 {
        struct drbd_device *device = (struct drbd_device *) data;
index ee7ad5e44632ebf8a703142ab53548cb93ae76a3..7be2375db7f2630e8412bd2c21819ae35a6c8da0 100644 (file)
@@ -2506,26 +2506,6 @@ end_io:
 
 
 
-static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
-                         struct bio_vec *bvec)
-{
-       struct pktcdvd_device *pd = q->queuedata;
-       sector_t zone = get_zone(bmd->bi_sector, pd);
-       int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
-       int remaining = (pd->settings.size << 9) - used;
-       int remaining2;
-
-       /*
-        * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
-        * boundary, pkt_make_request() will split the bio.
-        */
-       remaining2 = PAGE_SIZE - bmd->bi_size;
-       remaining = max(remaining, remaining2);
-
-       BUG_ON(remaining < 0);
-       return remaining;
-}
-
 static void pkt_init_queue(struct pktcdvd_device *pd)
 {
        struct request_queue *q = pd->disk->queue;
@@ -2533,7 +2513,6 @@ static void pkt_init_queue(struct pktcdvd_device *pd)
        blk_queue_make_request(q, pkt_make_request);
        blk_queue_logical_block_size(q, CD_FRAMESIZE);
        blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
-       blk_queue_merge_bvec(q, pkt_merge_bvec);
        q->queuedata = pd;
 }
 
index dcc86937f55ca323184c89d27db2e25d60c59493..71dd061a7e11154e1c044e009c73de734cf389da 100644 (file)
@@ -3462,52 +3462,6 @@ static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
        return BLK_MQ_RQ_QUEUE_OK;
 }
 
-/*
- * a queue callback. Makes sure that we don't create a bio that spans across
- * multiple osd objects. One exception would be with a single page bios,
- * which we handle later at bio_chain_clone_range()
- */
-static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
-                         struct bio_vec *bvec)
-{
-       struct rbd_device *rbd_dev = q->queuedata;
-       sector_t sector_offset;
-       sector_t sectors_per_obj;
-       sector_t obj_sector_offset;
-       int ret;
-
-       /*
-        * Find how far into its rbd object the partition-relative
-        * bio start sector is to offset relative to the enclosing
-        * device.
-        */
-       sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
-       sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
-       obj_sector_offset = sector_offset & (sectors_per_obj - 1);
-
-       /*
-        * Compute the number of bytes from that offset to the end
-        * of the object.  Account for what's already used by the bio.
-        */
-       ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
-       if (ret > bmd->bi_size)
-               ret -= bmd->bi_size;
-       else
-               ret = 0;
-
-       /*
-        * Don't send back more than was asked for.  And if the bio
-        * was empty, let the whole thing through because:  "Note
-        * that a block device *must* allow a single page to be
-        * added to an empty bio."
-        */
-       rbd_assert(bvec->bv_len <= PAGE_SIZE);
-       if (ret > (int) bvec->bv_len || !bmd->bi_size)
-               ret = (int) bvec->bv_len;
-
-       return ret;
-}
-
 static void rbd_free_disk(struct rbd_device *rbd_dev)
 {
        struct gendisk *disk = rbd_dev->disk;
@@ -3806,7 +3760,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
        q->limits.discard_zeroes_data = 1;
 
-       blk_queue_merge_bvec(q, rbd_merge_bvec);
        disk->queue = q;
 
        q->queuedata = rbd_dev;
index 04d0dadc48b1652e6d2bba82f4a4dd319ed412b2..d2b5dfbb30cfb92e92940cfc9098f55527a11399 100644 (file)
@@ -3771,26 +3771,6 @@ static int cache_iterate_devices(struct dm_target *ti,
        return r;
 }
 
-/*
- * We assume I/O is going to the origin (which is the volume
- * more likely to have restrictions e.g. by being striped).
- * (Looking up the exact location of the data would be expensive
- * and could always be out of date by the time the bio is submitted.)
- */
-static int cache_bvec_merge(struct dm_target *ti,
-                           struct bvec_merge_data *bvm,
-                           struct bio_vec *biovec, int max_size)
-{
-       struct cache *cache = ti->private;
-       struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = cache->origin_dev->bdev;
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
 {
        /*
@@ -3834,7 +3814,6 @@ static struct target_type cache_target = {
        .status = cache_status,
        .message = cache_message,
        .iterate_devices = cache_iterate_devices,
-       .merge = cache_bvec_merge,
        .io_hints = cache_io_hints,
 };
 
index 59da573cf994719c9eea162fbeb9357ab78c2d0b..ba5c2105f4e62f4d05c755fca131484f7633b6c1 100644 (file)
@@ -2035,21 +2035,6 @@ error:
        return -EINVAL;
 }
 
-static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                      struct bio_vec *biovec, int max_size)
-{
-       struct crypt_config *cc = ti->private;
-       struct request_queue *q = bdev_get_queue(cc->dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = cc->dev->bdev;
-       bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int crypt_iterate_devices(struct dm_target *ti,
                                 iterate_devices_callout_fn fn, void *data)
 {
@@ -2070,7 +2055,6 @@ static struct target_type crypt_target = {
        .preresume = crypt_preresume,
        .resume = crypt_resume,
        .message = crypt_message,
-       .merge  = crypt_merge,
        .iterate_devices = crypt_iterate_devices,
 };
 
index ad913cd4aded33206ce8b79bae7e13d8d593f2fb..0119ebfb3d49b4652bd71548af42037aac22953a 100644 (file)
@@ -1673,20 +1673,6 @@ static int era_iterate_devices(struct dm_target *ti,
        return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
 }
 
-static int era_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                    struct bio_vec *biovec, int max_size)
-{
-       struct era *era = ti->private;
-       struct request_queue *q = bdev_get_queue(era->origin_dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = era->origin_dev->bdev;
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct era *era = ti->private;
@@ -1717,7 +1703,6 @@ static struct target_type era_target = {
        .status = era_status,
        .message = era_message,
        .iterate_devices = era_iterate_devices,
-       .merge = era_merge,
        .io_hints = era_io_hints
 };
 
index 04481247aab866961dc7e7d4f2365dec1191d5a0..afab13bd683e0e7bed2c21846155a0a9ce086e6e 100644 (file)
@@ -387,21 +387,6 @@ static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long ar
        return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
-static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                       struct bio_vec *biovec, int max_size)
-{
-       struct flakey_c *fc = ti->private;
-       struct request_queue *q = bdev_get_queue(fc->dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = fc->dev->bdev;
-       bvm->bi_sector = flakey_map_sector(ti, bvm->bi_sector);
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
 {
        struct flakey_c *fc = ti->private;
@@ -419,7 +404,6 @@ static struct target_type flakey_target = {
        .end_io = flakey_end_io,
        .status = flakey_status,
        .ioctl  = flakey_ioctl,
-       .merge  = flakey_merge,
        .iterate_devices = flakey_iterate_devices,
 };
 
index 53e848c1093936560a9554c9fdacbec2f6dae5bd..7dd5fc8e3eeaac8d8d8f148862ecf3c6f298fec2 100644 (file)
@@ -130,21 +130,6 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
        return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
-static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                       struct bio_vec *biovec, int max_size)
-{
-       struct linear_c *lc = ti->private;
-       struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = lc->dev->bdev;
-       bvm->bi_sector = linear_map_sector(ti, bvm->bi_sector);
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int linear_iterate_devices(struct dm_target *ti,
                                  iterate_devices_callout_fn fn, void *data)
 {
@@ -162,7 +147,6 @@ static struct target_type linear_target = {
        .map    = linear_map,
        .status = linear_status,
        .ioctl  = linear_ioctl,
-       .merge  = linear_merge,
        .iterate_devices = linear_iterate_devices,
 };
 
index e9d17488d5e321aeeebb9ca8d237f0d546eb8a40..316cc3fb741f972a075fb56888585ef4ada72468 100644 (file)
@@ -725,21 +725,6 @@ static int log_writes_ioctl(struct dm_target *ti, unsigned int cmd,
        return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
 }
 
-static int log_writes_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                           struct bio_vec *biovec, int max_size)
-{
-       struct log_writes_c *lc = ti->private;
-       struct request_queue *q = bdev_get_queue(lc->dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = lc->dev->bdev;
-       bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int log_writes_iterate_devices(struct dm_target *ti,
                                      iterate_devices_callout_fn fn,
                                      void *data)
@@ -793,7 +778,6 @@ static struct target_type log_writes_target = {
        .end_io = normal_end_io,
        .status = log_writes_status,
        .ioctl  = log_writes_ioctl,
-       .merge  = log_writes_merge,
        .message = log_writes_message,
        .iterate_devices = log_writes_iterate_devices,
        .io_hints = log_writes_io_hints,
index 2daa6779351179974f0a706f52de57dd722369ef..97e165183e79f2991f8191913e0b44fb91b00310 100644 (file)
@@ -1717,24 +1717,6 @@ static void raid_resume(struct dm_target *ti)
        mddev_resume(&rs->md);
 }
 
-static int raid_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                     struct bio_vec *biovec, int max_size)
-{
-       struct raid_set *rs = ti->private;
-       struct md_personality *pers = rs->md.pers;
-
-       if (pers && pers->mergeable_bvec)
-               return min(max_size, pers->mergeable_bvec(&rs->md, bvm, biovec));
-
-       /*
-        * In case we can't request the personality because
-        * the raid set is not running yet
-        *
-        * -> return safe minimum
-        */
-       return rs->md.chunk_sectors;
-}
-
 static struct target_type raid_target = {
        .name = "raid",
        .version = {1, 7, 0},
@@ -1749,7 +1731,6 @@ static struct target_type raid_target = {
        .presuspend = raid_presuspend,
        .postsuspend = raid_postsuspend,
        .resume = raid_resume,
-       .merge = raid_merge,
 };
 
 static int __init dm_raid_init(void)
index dd8ca0bb09806ae4b4ddcd43ca90a23a79f97be5..d10b6876018eee6b62ca7f937022d3648c6295c0 100644 (file)
@@ -2330,20 +2330,6 @@ static void origin_status(struct dm_target *ti, status_type_t type,
        }
 }
 
-static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                       struct bio_vec *biovec, int max_size)
-{
-       struct dm_origin *o = ti->private;
-       struct request_queue *q = bdev_get_queue(o->dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = o->dev->bdev;
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int origin_iterate_devices(struct dm_target *ti,
                                  iterate_devices_callout_fn fn, void *data)
 {
@@ -2362,7 +2348,6 @@ static struct target_type origin_target = {
        .resume  = origin_resume,
        .postsuspend = origin_postsuspend,
        .status  = origin_status,
-       .merge   = origin_merge,
        .iterate_devices = origin_iterate_devices,
 };
 
index 4f94c7da82f6acb0a87245311363cc49365dccdf..484029db8cba10931a91b0fabfdd58c315904a9d 100644 (file)
@@ -412,26 +412,6 @@ static void stripe_io_hints(struct dm_target *ti,
        blk_limits_io_opt(limits, chunk_size * sc->stripes);
 }
 
-static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                       struct bio_vec *biovec, int max_size)
-{
-       struct stripe_c *sc = ti->private;
-       sector_t bvm_sector = bvm->bi_sector;
-       uint32_t stripe;
-       struct request_queue *q;
-
-       stripe_map_sector(sc, bvm_sector, &stripe, &bvm_sector);
-
-       q = bdev_get_queue(sc->stripe[stripe].dev->bdev);
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = sc->stripe[stripe].dev->bdev;
-       bvm->bi_sector = sc->stripe[stripe].physical_start + bvm_sector;
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static struct target_type stripe_target = {
        .name   = "striped",
        .version = {1, 5, 1},
@@ -443,7 +423,6 @@ static struct target_type stripe_target = {
        .status = stripe_status,
        .iterate_devices = stripe_iterate_devices,
        .io_hints = stripe_io_hints,
-       .merge  = stripe_merge,
 };
 
 int __init dm_stripe_init(void)
index 16ba55ad708992f7e942b2f6ce2048d12be5c1b6..afb4ad3dfeb3b1526e2d87f2b93a71f7765caa98 100644 (file)
@@ -440,14 +440,6 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
                       q->limits.alignment_offset,
                       (unsigned long long) start << SECTOR_SHIFT);
 
-       /*
-        * Check if merge fn is supported.
-        * If not we'll force DM to use PAGE_SIZE or
-        * smaller I/O, just to be safe.
-        */
-       if (dm_queue_merge_is_compulsory(q) && !ti->type->merge)
-               blk_limits_max_hw_sectors(limits,
-                                         (unsigned int) (PAGE_SIZE >> 9));
        return 0;
 }
 
index 2ade2c46dca9e30473775b7f2d9cf0ea163c0857..f352e4990998314f4d6b6128a042168e561b9302 100644 (file)
@@ -3845,20 +3845,6 @@ static int pool_iterate_devices(struct dm_target *ti,
        return fn(ti, pt->data_dev, 0, ti->len, data);
 }
 
-static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                     struct bio_vec *biovec, int max_size)
-{
-       struct pool_c *pt = ti->private;
-       struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = pt->data_dev->bdev;
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct pool_c *pt = ti->private;
@@ -3935,7 +3921,6 @@ static struct target_type pool_target = {
        .resume = pool_resume,
        .message = pool_message,
        .status = pool_status,
-       .merge = pool_merge,
        .iterate_devices = pool_iterate_devices,
        .io_hints = pool_io_hints,
 };
@@ -4262,21 +4247,6 @@ err:
        DMEMIT("Error");
 }
 
-static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                     struct bio_vec *biovec, int max_size)
-{
-       struct thin_c *tc = ti->private;
-       struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = tc->pool_dev->bdev;
-       bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int thin_iterate_devices(struct dm_target *ti,
                                iterate_devices_callout_fn fn, void *data)
 {
@@ -4320,7 +4290,6 @@ static struct target_type thin_target = {
        .presuspend = thin_presuspend,
        .postsuspend = thin_postsuspend,
        .status = thin_status,
-       .merge = thin_merge,
        .iterate_devices = thin_iterate_devices,
        .io_hints = thin_io_hints,
 };
index 4b34df8fdb5816704c6180322e6ce2d63c5f04f5..c137dcb147b8329015ff1872f09418993eab6df4 100644 (file)
@@ -649,21 +649,6 @@ static int verity_ioctl(struct dm_target *ti, unsigned cmd,
                                     cmd, arg);
 }
 
-static int verity_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
-                       struct bio_vec *biovec, int max_size)
-{
-       struct dm_verity *v = ti->private;
-       struct request_queue *q = bdev_get_queue(v->data_dev->bdev);
-
-       if (!q->merge_bvec_fn)
-               return max_size;
-
-       bvm->bi_bdev = v->data_dev->bdev;
-       bvm->bi_sector = verity_map_sector(v, bvm->bi_sector);
-
-       return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
-}
-
 static int verity_iterate_devices(struct dm_target *ti,
                                  iterate_devices_callout_fn fn, void *data)
 {
@@ -996,7 +981,6 @@ static struct target_type verity_target = {
        .map            = verity_map,
        .status         = verity_status,
        .ioctl          = verity_ioctl,
-       .merge          = verity_merge,
        .iterate_devices = verity_iterate_devices,
        .io_hints       = verity_io_hints,
 };
index 069f8d7e890e6840c599cfafd0e246a7d057a8e1..8bb1ebb6ca7b5ea5836824d6bf281232143cec48 100644 (file)
@@ -124,9 +124,8 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
 #define DMF_FREEING 3
 #define DMF_DELETING 4
 #define DMF_NOFLUSH_SUSPENDING 5
-#define DMF_MERGE_IS_OPTIONAL 6
-#define DMF_DEFERRED_REMOVE 7
-#define DMF_SUSPENDED_INTERNALLY 8
+#define DMF_DEFERRED_REMOVE 6
+#define DMF_SUSPENDED_INTERNALLY 7
 
 /*
  * A dummy definition to make RCU happy.
@@ -1725,67 +1724,6 @@ static void __split_and_process_bio(struct mapped_device *md,
  * CRUD END
  *---------------------------------------------------------------*/
 
-static int dm_merge_bvec(struct request_queue *q,
-                        struct bvec_merge_data *bvm,
-                        struct bio_vec *biovec)
-{
-       struct mapped_device *md = q->queuedata;
-       struct dm_table *map = dm_get_live_table_fast(md);
-       struct dm_target *ti;
-       sector_t max_sectors, max_size = 0;
-
-       if (unlikely(!map))
-               goto out;
-
-       ti = dm_table_find_target(map, bvm->bi_sector);
-       if (!dm_target_is_valid(ti))
-               goto out;
-
-       /*
-        * Find maximum amount of I/O that won't need splitting
-        */
-       max_sectors = min(max_io_len(bvm->bi_sector, ti),
-                         (sector_t) queue_max_sectors(q));
-       max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-
-       /*
-        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
-        * to the targets' merge function since it holds sectors not bytes).
-        * Just doing this as an interim fix for stable@ because the more
-        * comprehensive cleanup of switching to sector_t will impact every
-        * DM target that implements a ->merge hook.
-        */
-       if (max_size > INT_MAX)
-               max_size = INT_MAX;
-
-       /*
-        * merge_bvec_fn() returns number of bytes
-        * it can accept at this offset
-        * max is precomputed maximal io size
-        */
-       if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
-       /*
-        * If the target doesn't support merge method and some of the devices
-        * provided their merge_bvec method (we know this by looking for the
-        * max_hw_sectors that dm_set_device_limits may set), then we can't
-        * allow bios with multiple vector entries.  So always set max_size
-        * to 0, and the code below allows just one page.
-        */
-       else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
-               max_size = 0;
-
-out:
-       dm_put_live_table_fast(md);
-       /*
-        * Always allow an entire first page
-        */
-       if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
-               max_size = biovec->bv_len;
-
-       return max_size;
-}
-
 /*
  * The request function that just remaps the bio built up by
  * dm_merge_bvec.
@@ -2507,59 +2445,6 @@ static void __set_size(struct mapped_device *md, sector_t size)
        i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
 }
 
-/*
- * Return 1 if the queue has a compulsory merge_bvec_fn function.
- *
- * If this function returns 0, then the device is either a non-dm
- * device without a merge_bvec_fn, or it is a dm device that is
- * able to split any bios it receives that are too big.
- */
-int dm_queue_merge_is_compulsory(struct request_queue *q)
-{
-       struct mapped_device *dev_md;
-
-       if (!q->merge_bvec_fn)
-               return 0;
-
-       if (q->make_request_fn == dm_make_request) {
-               dev_md = q->queuedata;
-               if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
-                       return 0;
-       }
-
-       return 1;
-}
-
-static int dm_device_merge_is_compulsory(struct dm_target *ti,
-                                        struct dm_dev *dev, sector_t start,
-                                        sector_t len, void *data)
-{
-       struct block_device *bdev = dev->bdev;
-       struct request_queue *q = bdev_get_queue(bdev);
-
-       return dm_queue_merge_is_compulsory(q);
-}
-
-/*
- * Return 1 if it is acceptable to ignore merge_bvec_fn based
- * on the properties of the underlying devices.
- */
-static int dm_table_merge_is_optional(struct dm_table *table)
-{
-       unsigned i = 0;
-       struct dm_target *ti;
-
-       while (i < dm_table_get_num_targets(table)) {
-               ti = dm_table_get_target(table, i++);
-
-               if (ti->type->iterate_devices &&
-                   ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
-                       return 0;
-       }
-
-       return 1;
-}
-
 /*
  * Returns old map, which caller must destroy.
  */
@@ -2569,7 +2454,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
        struct dm_table *old_map;
        struct request_queue *q = md->queue;
        sector_t size;
-       int merge_is_optional;
 
        size = dm_table_get_size(t);
 
@@ -2595,17 +2479,11 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
 
        __bind_mempools(md, t);
 
-       merge_is_optional = dm_table_merge_is_optional(t);
-
        old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
        rcu_assign_pointer(md->map, t);
        md->immutable_target_type = dm_table_get_immutable_target_type(t);
 
        dm_table_set_restrictions(t, q, limits);
-       if (merge_is_optional)
-               set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
-       else
-               clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
        if (old_map)
                dm_sync_table(md);
 
@@ -2886,7 +2764,6 @@ int dm_setup_md_queue(struct mapped_device *md)
        case DM_TYPE_BIO_BASED:
                dm_init_old_md_queue(md);
                blk_queue_make_request(md->queue, dm_make_request);
-               blk_queue_merge_bvec(md->queue, dm_merge_bvec);
                break;
        }
 
index 4e984993d40aaddb43af0d4add4faba318d26fd6..7edcf97dfa5a66fc93b964c4f16671d72a17e5a8 100644 (file)
@@ -78,8 +78,6 @@ bool dm_table_mq_request_based(struct dm_table *t);
 void dm_table_free_md_mempools(struct dm_table *t);
 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
 
-int dm_queue_merge_is_compulsory(struct request_queue *q);
-
 void dm_lock_md_type(struct mapped_device *md);
 void dm_unlock_md_type(struct mapped_device *md);
 void dm_set_md_type(struct mapped_device *md, unsigned type);
index aefd66142eef1321a951f899f72e842558d21468..b7fe7e9fc77730a4f7a1aca6e932aa1420b90967 100644 (file)
@@ -52,48 +52,6 @@ static inline struct dev_info *which_dev(struct mddev *mddev, sector_t sector)
        return conf->disks + lo;
 }
 
-/**
- *     linear_mergeable_bvec -- tell bio layer if two requests can be merged
- *     @q: request queue
- *     @bvm: properties of new bio
- *     @biovec: the request that could be merged to it.
- *
- *     Return amount of bytes we can take at this offset
- */
-static int linear_mergeable_bvec(struct mddev *mddev,
-                                struct bvec_merge_data *bvm,
-                                struct bio_vec *biovec)
-{
-       struct dev_info *dev0;
-       unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
-       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-       int maxbytes = biovec->bv_len;
-       struct request_queue *subq;
-
-       dev0 = which_dev(mddev, sector);
-       maxsectors = dev0->end_sector - sector;
-       subq = bdev_get_queue(dev0->rdev->bdev);
-       if (subq->merge_bvec_fn) {
-               bvm->bi_bdev = dev0->rdev->bdev;
-               bvm->bi_sector -= dev0->end_sector - dev0->rdev->sectors;
-               maxbytes = min(maxbytes, subq->merge_bvec_fn(subq, bvm,
-                                                            biovec));
-       }
-
-       if (maxsectors < bio_sectors)
-               maxsectors = 0;
-       else
-               maxsectors -= bio_sectors;
-
-       if (maxsectors <= (PAGE_SIZE >> 9 ) && bio_sectors == 0)
-               return maxbytes;
-
-       if (maxsectors > (maxbytes >> 9))
-               return maxbytes;
-       else
-               return maxsectors << 9;
-}
-
 static int linear_congested(struct mddev *mddev, int bits)
 {
        struct linear_conf *conf;
@@ -338,7 +296,6 @@ static struct md_personality linear_personality =
        .size           = linear_size,
        .quiesce        = linear_quiesce,
        .congested      = linear_congested,
-       .mergeable_bvec = linear_mergeable_bvec,
 };
 
 static int __init linear_init (void)
index e1d8723720ccbcd72a51239927e85342d29ab4e5..d28bf5cea2243e9b620895758758f01fbc560e23 100644 (file)
@@ -354,29 +354,6 @@ static int md_congested(void *data, int bits)
        return mddev_congested(mddev, bits);
 }
 
-static int md_mergeable_bvec(struct request_queue *q,
-                            struct bvec_merge_data *bvm,
-                            struct bio_vec *biovec)
-{
-       struct mddev *mddev = q->queuedata;
-       int ret;
-       rcu_read_lock();
-       if (mddev->suspended) {
-               /* Must always allow one vec */
-               if (bvm->bi_size == 0)
-                       ret = biovec->bv_len;
-               else
-                       ret = 0;
-       } else {
-               struct md_personality *pers = mddev->pers;
-               if (pers && pers->mergeable_bvec)
-                       ret = pers->mergeable_bvec(mddev, bvm, biovec);
-               else
-                       ret = biovec->bv_len;
-       }
-       rcu_read_unlock();
-       return ret;
-}
 /*
  * Generic flush handling for md
  */
@@ -5188,7 +5165,6 @@ int md_run(struct mddev *mddev)
        if (mddev->queue) {
                mddev->queue->backing_dev_info.congested_data = mddev;
                mddev->queue->backing_dev_info.congested_fn = md_congested;
-               blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
        }
        if (pers->sync_request) {
                if (mddev->kobj.sd &&
@@ -5317,7 +5293,6 @@ static void md_clean(struct mddev *mddev)
        mddev->degraded = 0;
        mddev->safemode = 0;
        mddev->private = NULL;
-       mddev->merge_check_needed = 0;
        mddev->bitmap_info.offset = 0;
        mddev->bitmap_info.default_offset = 0;
        mddev->bitmap_info.default_space = 0;
@@ -5514,7 +5489,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
 
                __md_stop_writes(mddev);
                __md_stop(mddev);
-               mddev->queue->merge_bvec_fn = NULL;
                mddev->queue->backing_dev_info.congested_fn = NULL;
 
                /* tell userspace to handle 'inactive' */
index 7da6e9c3cb53eb7d28ab44f6d40dbcea63e2d486..ab339571e57f746cd890edcb024969c3cc46c1c1 100644 (file)
@@ -134,10 +134,6 @@ enum flag_bits {
        Bitmap_sync,            /* ..actually, not quite In_sync.  Need a
                                 * bitmap-based recovery to get fully in sync
                                 */
-       Unmerged,               /* device is being added to array and should
-                                * be considerred for bvec_merge_fn but not
-                                * yet for actual IO
-                                */
        WriteMostly,            /* Avoid reading if at all possible */
        AutoDetected,           /* added by auto-detect */
        Blocked,                /* An error occurred but has not yet
@@ -374,10 +370,6 @@ struct mddev {
        int                             degraded;       /* whether md should consider
                                                         * adding a spare
                                                         */
-       int                             merge_check_needed; /* at least one
-                                                            * member device
-                                                            * has a
-                                                            * merge_bvec_fn */
 
        atomic_t                        recovery_active; /* blocks scheduled, but not written */
        wait_queue_head_t               recovery_wait;
@@ -532,10 +524,6 @@ struct md_personality
        /* congested implements bdi.congested_fn().
         * Will not be called while array is 'suspended' */
        int (*congested)(struct mddev *mddev, int bits);
-       /* mergeable_bvec is use to implement ->merge_bvec_fn */
-       int (*mergeable_bvec)(struct mddev *mddev,
-                             struct bvec_merge_data *bvm,
-                             struct bio_vec *biovec);
 };
 
 struct md_sysfs_entry {
index 082a489af9d38f4d60bf76bd147812713a6208b0..d222522c52e077dcdd012fbf22d929fc6a7cb0ad 100644 (file)
@@ -257,18 +257,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
                                          rdev->data_offset << 9);
 
-               /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, so limit ->max_segments to one, lying
-                * within a single page.
-                * (Note: it is very unlikely that a device with
-                * merge_bvec_fn will be involved in multipath.)
-                */
-                       if (q->merge_bvec_fn) {
-                               blk_queue_max_segments(mddev->queue, 1);
-                               blk_queue_segment_boundary(mddev->queue,
-                                                          PAGE_CACHE_SIZE - 1);
-                       }
-
                        spin_lock_irq(&conf->device_lock);
                        mddev->degraded--;
                        rdev->raid_disk = path;
@@ -432,15 +420,6 @@ static int multipath_run (struct mddev *mddev)
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
 
-               /* as we don't honour merge_bvec_fn, we must never risk
-                * violating it, not that we ever expect a device with
-                * a merge_bvec_fn to be involved in multipath */
-               if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
-                       blk_queue_max_segments(mddev->queue, 1);
-                       blk_queue_segment_boundary(mddev->queue,
-                                                  PAGE_CACHE_SIZE - 1);
-               }
-
                if (!test_bit(Faulty, &rdev->flags))
                        working_disks++;
        }
index e6e0ae56f66be9d8b6e9c3a5213fe8fbed5b85a2..59cda501a224af2eb84667ae1283870ce24bdd16 100644 (file)
@@ -192,9 +192,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                        disk_stack_limits(mddev->gendisk, rdev1->bdev,
                                          rdev1->data_offset << 9);
 
-               if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
-                       conf->has_merge_bvec = 1;
-
                if (!smallest || (rdev1->sectors < smallest->sectors))
                        smallest = rdev1;
                cnt++;
@@ -351,58 +348,6 @@ static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
                             + sector_div(sector, zone->nb_dev)];
 }
 
-/**
- *     raid0_mergeable_bvec -- tell bio layer if two requests can be merged
- *     @mddev: the md device
- *     @bvm: properties of new bio
- *     @biovec: the request that could be merged to it.
- *
- *     Return amount of bytes we can accept at this offset
- */
-static int raid0_mergeable_bvec(struct mddev *mddev,
-                               struct bvec_merge_data *bvm,
-                               struct bio_vec *biovec)
-{
-       struct r0conf *conf = mddev->private;
-       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-       sector_t sector_offset = sector;
-       int max;
-       unsigned int chunk_sectors = mddev->chunk_sectors;
-       unsigned int bio_sectors = bvm->bi_size >> 9;
-       struct strip_zone *zone;
-       struct md_rdev *rdev;
-       struct request_queue *subq;
-
-       if (is_power_of_2(chunk_sectors))
-               max =  (chunk_sectors - ((sector & (chunk_sectors-1))
-                                               + bio_sectors)) << 9;
-       else
-               max =  (chunk_sectors - (sector_div(sector, chunk_sectors)
-                                               + bio_sectors)) << 9;
-       if (max < 0)
-               max = 0; /* bio_add cannot handle a negative return */
-       if (max <= biovec->bv_len && bio_sectors == 0)
-               return biovec->bv_len;
-       if (max < biovec->bv_len)
-               /* too small already, no need to check further */
-               return max;
-       if (!conf->has_merge_bvec)
-               return max;
-
-       /* May need to check subordinate device */
-       sector = sector_offset;
-       zone = find_zone(mddev->private, &sector_offset);
-       rdev = map_sector(mddev, zone, sector, &sector_offset);
-       subq = bdev_get_queue(rdev->bdev);
-       if (subq->merge_bvec_fn) {
-               bvm->bi_bdev = rdev->bdev;
-               bvm->bi_sector = sector_offset + zone->dev_start +
-                       rdev->data_offset;
-               return min(max, subq->merge_bvec_fn(subq, bvm, biovec));
-       } else
-               return max;
-}
-
 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
 {
        sector_t array_sectors = 0;
@@ -727,7 +672,6 @@ static struct md_personality raid0_personality=
        .takeover       = raid0_takeover,
        .quiesce        = raid0_quiesce,
        .congested      = raid0_congested,
-       .mergeable_bvec = raid0_mergeable_bvec,
 };
 
 static int __init raid0_init (void)
index 05539d9c97f06212ad3f436585e6574557c5f962..7127a623f5da2d4e5592270ee9291b9820efea81 100644 (file)
@@ -12,8 +12,6 @@ struct r0conf {
        struct md_rdev          **devlist; /* lists of rdevs, pointed to
                                            * by strip_zone->dev */
        int                     nr_strip_zones;
-       int                     has_merge_bvec; /* at least one member has
-                                                * a merge_bvec_fn */
 };
 
 #endif
index 60d0a8626e63171e6fc26883b6244ebc33507e6a..0ff06fdb83a9b4d7bcc8036923833f0f53cad7a9 100644 (file)
@@ -557,7 +557,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
                rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (r1_bio->bios[disk] == IO_BLOCKED
                    || rdev == NULL
-                   || test_bit(Unmerged, &rdev->flags)
                    || test_bit(Faulty, &rdev->flags))
                        continue;
                if (!test_bit(In_sync, &rdev->flags) &&
@@ -708,38 +707,6 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
        return best_disk;
 }
 
-static int raid1_mergeable_bvec(struct mddev *mddev,
-                               struct bvec_merge_data *bvm,
-                               struct bio_vec *biovec)
-{
-       struct r1conf *conf = mddev->private;
-       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-       int max = biovec->bv_len;
-
-       if (mddev->merge_check_needed) {
-               int disk;
-               rcu_read_lock();
-               for (disk = 0; disk < conf->raid_disks * 2; disk++) {
-                       struct md_rdev *rdev = rcu_dereference(
-                               conf->mirrors[disk].rdev);
-                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                               struct request_queue *q =
-                                       bdev_get_queue(rdev->bdev);
-                               if (q->merge_bvec_fn) {
-                                       bvm->bi_sector = sector +
-                                               rdev->data_offset;
-                                       bvm->bi_bdev = rdev->bdev;
-                                       max = min(max, q->merge_bvec_fn(
-                                                         q, bvm, biovec));
-                               }
-                       }
-               }
-               rcu_read_unlock();
-       }
-       return max;
-
-}
-
 static int raid1_congested(struct mddev *mddev, int bits)
 {
        struct r1conf *conf = mddev->private;
@@ -1268,8 +1235,7 @@ read_again:
                        break;
                }
                r1_bio->bios[i] = NULL;
-               if (!rdev || test_bit(Faulty, &rdev->flags)
-                   || test_bit(Unmerged, &rdev->flags)) {
+               if (!rdev || test_bit(Faulty, &rdev->flags)) {
                        if (i < conf->raid_disks)
                                set_bit(R1BIO_Degraded, &r1_bio->state);
                        continue;
@@ -1614,7 +1580,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        struct raid1_info *p;
        int first = 0;
        int last = conf->raid_disks - 1;
-       struct request_queue *q = bdev_get_queue(rdev->bdev);
 
        if (mddev->recovery_disabled == conf->recovery_disabled)
                return -EBUSY;
@@ -1622,11 +1587,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
-       if (q->merge_bvec_fn) {
-               set_bit(Unmerged, &rdev->flags);
-               mddev->merge_check_needed = 1;
-       }
-
        for (mirror = first; mirror <= last; mirror++) {
                p = conf->mirrors+mirror;
                if (!p->rdev) {
@@ -1658,19 +1618,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
-       if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
-               /* Some requests might not have seen this new
-                * merge_bvec_fn.  We must wait for them to complete
-                * before merging the device fully.
-                * First we make sure any code which has tested
-                * our function has submitted the request, then
-                * we wait for all outstanding requests to complete.
-                */
-               synchronize_sched();
-               freeze_array(conf, 0);
-               unfreeze_array(conf);
-               clear_bit(Unmerged, &rdev->flags);
-       }
        md_integrity_add_rdev(rdev, mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -2806,8 +2753,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
                        goto abort;
                disk->rdev = rdev;
                q = bdev_get_queue(rdev->bdev);
-               if (q->merge_bvec_fn)
-                       mddev->merge_check_needed = 1;
 
                disk->head_position = 0;
                disk->seq_start = MaxSector;
@@ -3172,7 +3117,6 @@ static struct md_personality raid1_personality =
        .quiesce        = raid1_quiesce,
        .takeover       = raid1_takeover,
        .congested      = raid1_congested,
-       .mergeable_bvec = raid1_mergeable_bvec,
 };
 
 static int __init raid_init(void)
index 316ff6f611e96c2d97feac3322a223e36ce7f23a..d92098f3e65bdf1bcd1f8997b018d8e064610de3 100644 (file)
@@ -671,93 +671,6 @@ static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
        return (vchunk << geo->chunk_shift) + offset;
 }
 
-/**
- *     raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- *     @mddev: the md device
- *     @bvm: properties of new bio
- *     @biovec: the request that could be merged to it.
- *
- *     Return amount of bytes we can accept at this offset
- *     This requires checking for end-of-chunk if near_copies != raid_disks,
- *     and for subordinate merge_bvec_fns if merge_check_needed.
- */
-static int raid10_mergeable_bvec(struct mddev *mddev,
-                                struct bvec_merge_data *bvm,
-                                struct bio_vec *biovec)
-{
-       struct r10conf *conf = mddev->private;
-       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-       int max;
-       unsigned int chunk_sectors;
-       unsigned int bio_sectors = bvm->bi_size >> 9;
-       struct geom *geo = &conf->geo;
-
-       chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
-       if (conf->reshape_progress != MaxSector &&
-           ((sector >= conf->reshape_progress) !=
-            conf->mddev->reshape_backwards))
-               geo = &conf->prev;
-
-       if (geo->near_copies < geo->raid_disks) {
-               max = (chunk_sectors - ((sector & (chunk_sectors - 1))
-                                       + bio_sectors)) << 9;
-               if (max < 0)
-                       /* bio_add cannot handle a negative return */
-                       max = 0;
-               if (max <= biovec->bv_len && bio_sectors == 0)
-                       return biovec->bv_len;
-       } else
-               max = biovec->bv_len;
-
-       if (mddev->merge_check_needed) {
-               struct {
-                       struct r10bio r10_bio;
-                       struct r10dev devs[conf->copies];
-               } on_stack;
-               struct r10bio *r10_bio = &on_stack.r10_bio;
-               int s;
-               if (conf->reshape_progress != MaxSector) {
-                       /* Cannot give any guidance during reshape */
-                       if (max <= biovec->bv_len && bio_sectors == 0)
-                               return biovec->bv_len;
-                       return 0;
-               }
-               r10_bio->sector = sector;
-               raid10_find_phys(conf, r10_bio);
-               rcu_read_lock();
-               for (s = 0; s < conf->copies; s++) {
-                       int disk = r10_bio->devs[s].devnum;
-                       struct md_rdev *rdev = rcu_dereference(
-                               conf->mirrors[disk].rdev);
-                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                               struct request_queue *q =
-                                       bdev_get_queue(rdev->bdev);
-                               if (q->merge_bvec_fn) {
-                                       bvm->bi_sector = r10_bio->devs[s].addr
-                                               + rdev->data_offset;
-                                       bvm->bi_bdev = rdev->bdev;
-                                       max = min(max, q->merge_bvec_fn(
-                                                         q, bvm, biovec));
-                               }
-                       }
-                       rdev = rcu_dereference(conf->mirrors[disk].replacement);
-                       if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                               struct request_queue *q =
-                                       bdev_get_queue(rdev->bdev);
-                               if (q->merge_bvec_fn) {
-                                       bvm->bi_sector = r10_bio->devs[s].addr
-                                               + rdev->data_offset;
-                                       bvm->bi_bdev = rdev->bdev;
-                                       max = min(max, q->merge_bvec_fn(
-                                                         q, bvm, biovec));
-                               }
-                       }
-               }
-               rcu_read_unlock();
-       }
-       return max;
-}
-
 /*
  * This routine returns the disk from which the requested read should
  * be done. There is a per-array 'next expected sequential IO' sector
@@ -820,12 +733,10 @@ retry:
                disk = r10_bio->devs[slot].devnum;
                rdev = rcu_dereference(conf->mirrors[disk].replacement);
                if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
-                   test_bit(Unmerged, &rdev->flags) ||
                    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
                        rdev = rcu_dereference(conf->mirrors[disk].rdev);
                if (rdev == NULL ||
-                   test_bit(Faulty, &rdev->flags) ||
-                   test_bit(Unmerged, &rdev->flags))
+                   test_bit(Faulty, &rdev->flags))
                        continue;
                if (!test_bit(In_sync, &rdev->flags) &&
                    r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
@@ -1325,11 +1236,9 @@ retry_write:
                        blocked_rdev = rrdev;
                        break;
                }
-               if (rdev && (test_bit(Faulty, &rdev->flags)
-                            || test_bit(Unmerged, &rdev->flags)))
+               if (rdev && (test_bit(Faulty, &rdev->flags)))
                        rdev = NULL;
-               if (rrdev && (test_bit(Faulty, &rrdev->flags)
-                             || test_bit(Unmerged, &rrdev->flags)))
+               if (rrdev && (test_bit(Faulty, &rrdev->flags)))
                        rrdev = NULL;
 
                r10_bio->devs[i].bio = NULL;
@@ -1776,7 +1685,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        int mirror;
        int first = 0;
        int last = conf->geo.raid_disks - 1;
-       struct request_queue *q = bdev_get_queue(rdev->bdev);
 
        if (mddev->recovery_cp < MaxSector)
                /* only hot-add to in-sync arrays, as recovery is
@@ -1789,11 +1697,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
-       if (q->merge_bvec_fn) {
-               set_bit(Unmerged, &rdev->flags);
-               mddev->merge_check_needed = 1;
-       }
-
        if (rdev->saved_raid_disk >= first &&
            conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
                mirror = rdev->saved_raid_disk;
@@ -1832,19 +1735,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                rcu_assign_pointer(p->rdev, rdev);
                break;
        }
-       if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
-               /* Some requests might not have seen this new
-                * merge_bvec_fn.  We must wait for them to complete
-                * before merging the device fully.
-                * First we make sure any code which has tested
-                * our function has submitted the request, then
-                * we wait for all outstanding requests to complete.
-                */
-               synchronize_sched();
-               freeze_array(conf, 0);
-               unfreeze_array(conf);
-               clear_bit(Unmerged, &rdev->flags);
-       }
        md_integrity_add_rdev(rdev, mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
@@ -2392,7 +2282,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (rdev &&
-                           !test_bit(Unmerged, &rdev->flags) &&
                            test_bit(In_sync, &rdev->flags) &&
                            is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
                                        &first_bad, &bad_sectors) == 0) {
@@ -2446,7 +2335,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                        d = r10_bio->devs[sl].devnum;
                        rdev = rcu_dereference(conf->mirrors[d].rdev);
                        if (!rdev ||
-                           test_bit(Unmerged, &rdev->flags) ||
                            !test_bit(In_sync, &rdev->flags))
                                continue;
 
@@ -3638,8 +3526,6 @@ static int run(struct mddev *mddev)
                        disk->rdev = rdev;
                }
                q = bdev_get_queue(rdev->bdev);
-               if (q->merge_bvec_fn)
-                       mddev->merge_check_needed = 1;
                diff = (rdev->new_data_offset - rdev->data_offset);
                if (!mddev->reshape_backwards)
                        diff = -diff;
@@ -4692,7 +4578,6 @@ static struct md_personality raid10_personality =
        .start_reshape  = raid10_start_reshape,
        .finish_reshape = raid10_finish_reshape,
        .congested      = raid10_congested,
-       .mergeable_bvec = raid10_mergeable_bvec,
 };
 
 static int __init raid_init(void)
index 7823295332de0f4a7ffb5a97dbca1fb47eae436c..6d20692952d247893092a8bc8f51cee8be1783bb 100644 (file)
@@ -4663,35 +4663,6 @@ static int raid5_congested(struct mddev *mddev, int bits)
        return 0;
 }
 
-/* We want read requests to align with chunks where possible,
- * but write requests don't need to.
- */
-static int raid5_mergeable_bvec(struct mddev *mddev,
-                               struct bvec_merge_data *bvm,
-                               struct bio_vec *biovec)
-{
-       sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
-       int max;
-       unsigned int chunk_sectors = mddev->chunk_sectors;
-       unsigned int bio_sectors = bvm->bi_size >> 9;
-
-       /*
-        * always allow writes to be mergeable, read as well if array
-        * is degraded as we'll go through stripe cache anyway.
-        */
-       if ((bvm->bi_rw & 1) == WRITE || mddev->degraded)
-               return biovec->bv_len;
-
-       if (mddev->new_chunk_sectors < mddev->chunk_sectors)
-               chunk_sectors = mddev->new_chunk_sectors;
-       max =  (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
-       if (max < 0) max = 0;
-       if (max <= biovec->bv_len && bio_sectors == 0)
-               return biovec->bv_len;
-       else
-               return max;
-}
-
 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
 {
        sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
@@ -7764,7 +7735,6 @@ static struct md_personality raid6_personality =
        .quiesce        = raid5_quiesce,
        .takeover       = raid6_takeover,
        .congested      = raid5_congested,
-       .mergeable_bvec = raid5_mergeable_bvec,
 };
 static struct md_personality raid5_personality =
 {
@@ -7788,7 +7758,6 @@ static struct md_personality raid5_personality =
        .quiesce        = raid5_quiesce,
        .takeover       = raid5_takeover,
        .congested      = raid5_congested,
-       .mergeable_bvec = raid5_mergeable_bvec,
 };
 
 static struct md_personality raid4_personality =
@@ -7813,7 +7782,6 @@ static struct md_personality raid4_personality =
        .quiesce        = raid5_quiesce,
        .takeover       = raid4_takeover,
        .congested      = raid5_congested,
-       .mergeable_bvec = raid5_mergeable_bvec,
 };
 
 static int __init raid5_init(void)
index ca778d9c7d81222f75024c93a8452d10efc49285..a1feff54aeab0b7fe9155fcc0b1a1fee3d9ac5e0 100644 (file)
@@ -213,14 +213,6 @@ typedef int (prep_rq_fn) (struct request_queue *, struct request *);
 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
 
 struct bio_vec;
-struct bvec_merge_data {
-       struct block_device *bi_bdev;
-       sector_t bi_sector;
-       unsigned bi_size;
-       unsigned long bi_rw;
-};
-typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
-                            struct bio_vec *);
 typedef void (softirq_done_fn)(struct request *);
 typedef int (dma_drain_needed_fn)(struct request *);
 typedef int (lld_busy_fn) (struct request_queue *q);
@@ -306,7 +298,6 @@ struct request_queue {
        make_request_fn         *make_request_fn;
        prep_rq_fn              *prep_rq_fn;
        unprep_rq_fn            *unprep_rq_fn;
-       merge_bvec_fn           *merge_bvec_fn;
        softirq_done_fn         *softirq_done_fn;
        rq_timed_out_fn         *rq_timed_out_fn;
        dma_drain_needed_fn     *dma_drain_needed;
@@ -992,7 +983,6 @@ extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
-extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
 extern void blk_queue_dma_alignment(struct request_queue *, int);
 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
index 51cc1deb7af3a38597a665300ef9405d8dbe3cef..76d23fa8c7d3a21ba294e62be223f5a6992132a8 100644 (file)
@@ -82,9 +82,6 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
                            unsigned long arg);
 
-typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
-                           struct bio_vec *biovec, int max_size);
-
 /*
  * These iteration functions are typically used to check (and combine)
  * properties of underlying devices.
@@ -160,7 +157,6 @@ struct target_type {
        dm_status_fn status;
        dm_message_fn message;
        dm_ioctl_fn ioctl;
-       dm_merge_fn merge;
        dm_busy_fn busy;
        dm_iterate_devices_fn iterate_devices;
        dm_io_hints_fn io_hints;