*
* Return amount of bytes we can take at this offset
*/
-static int linear_mergeable_bvec(struct request_queue *q,
+static int linear_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct dev_info *dev0;
unsigned long maxsectors, bio_sectors = bvm->bi_size >> 9;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
mddev->private = conf;
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
- blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
-
ret = md_integrity_register(mddev);
if (ret) {
kfree(conf);
.hot_add_disk = linear_add,
.size = linear_size,
.congested = linear_congested,
+ .mergeable_bvec = linear_mergeable_bvec,
};
static int __init linear_init (void)
return mddev_congested(mddev, bits);
}
+static int md_mergeable_bvec(struct request_queue *q,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec)
+{
+ struct mddev *mddev = q->queuedata;
+ int ret;
+ rcu_read_lock();
+ if (mddev->suspended) {
+ /* Must always allow one vec */
+ if (bvm->bi_size == 0)
+ ret = biovec->bv_len;
+ else
+ ret = 0;
+ } else {
+ struct md_personality *pers = mddev->pers;
+ if (pers && pers->mergeable_bvec)
+ ret = pers->mergeable_bvec(mddev, bvm, biovec);
+ else
+ ret = biovec->bv_len;
+ }
+ rcu_read_unlock();
+ return ret;
+}
/*
* Generic flush handling for md
*/
if (mddev->queue) {
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = md_congested;
+ blk_queue_merge_bvec(mddev->queue, md_mergeable_bvec);
}
if (mddev->pers->sync_request) {
if (mddev->kobj.sd &&
/* congested implements bdi.congested_fn().
* Will not be called while array is 'suspended' */
int (*congested)(struct mddev *mddev, int bits);
+ /* mergeable_bvec is use to implement ->merge_bvec_fn */
+ int (*mergeable_bvec)(struct mddev *mddev,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec);
};
struct md_sysfs_entry {
/**
* raid0_mergeable_bvec -- tell bio layer if two requests can be merged
- * @q: request queue
+ * @mddev: the md device
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* Return amount of bytes we can accept at this offset
*/
-static int raid0_mergeable_bvec(struct request_queue *q,
+static int raid0_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct r0conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
sector_t sector_offset = sector;
mddev->queue->backing_dev_info.ra_pages = 2* stripe;
}
- blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
dump_zones(mddev);
ret = md_integrity_register(mddev);
.takeover = raid0_takeover,
.quiesce = raid0_quiesce,
.congested = raid0_congested,
+ .mergeable_bvec = raid0_mergeable_bvec,
};
static int __init raid0_init (void)
return best_disk;
}
-static int raid1_mergeable_bvec(struct request_queue *q,
+static int raid1_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct r1conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max = biovec->bv_len;
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
if (mddev->queue) {
- blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
-
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
mddev->queue);
.quiesce = raid1_quiesce,
.takeover = raid1_takeover,
.congested = raid1_congested,
+ .mergeable_bvec = raid1_mergeable_bvec,
};
static int __init raid_init(void)
/**
* raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
- * @q: request queue
+ * @mddev: the md device
* @bvm: properties of new bio
* @biovec: the request that could be merged to it.
*
* This requires checking for end-of-chunk if near_copies != raid_disks,
* and for subordinate merge_bvec_fns if merge_check_needed.
*/
-static int raid10_mergeable_bvec(struct request_queue *q,
+static int raid10_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
struct r10conf *conf = mddev->private;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
stripe /= conf->geo.near_copies;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
}
if (md_integrity_register(mddev))
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
.congested = raid10_congested,
+ .mergeable_bvec = raid10_mergeable_bvec,
};
static int __init raid_init(void)
/* We want read requests to align with chunks where possible,
* but write requests don't need to.
*/
-static int raid5_mergeable_bvec(struct request_queue *q,
+static int raid5_mergeable_bvec(struct mddev *mddev,
struct bvec_merge_data *bvm,
struct bio_vec *biovec)
{
- struct mddev *mddev = q->queuedata;
sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
int max;
unsigned int chunk_sectors = mddev->chunk_sectors;
if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
- blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec);
-
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
blk_queue_io_opt(mddev->queue, chunk_size *
.quiesce = raid5_quiesce,
.takeover = raid6_takeover,
.congested = raid5_congested,
+ .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid5_personality =
{
.quiesce = raid5_quiesce,
.takeover = raid5_takeover,
.congested = raid5_congested,
+ .mergeable_bvec = raid5_mergeable_bvec,
};
static struct md_personality raid4_personality =
.quiesce = raid5_quiesce,
.takeover = raid4_takeover,
.congested = raid5_congested,
+ .mergeable_bvec = raid5_mergeable_bvec,
};
static int __init raid5_init(void)