From: Shaohua Li Date: Mon, 1 May 2017 21:09:21 +0000 (-0700) Subject: Merge branch 'md-next' into md-linus X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=e265eb3a30543a237b2ebc4e0422ac82e55b07e4;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git Merge branch 'md-next' into md-linus --- e265eb3a30543a237b2ebc4e0422ac82e55b07e4 diff --cc drivers/md/linear.c index 377a8a3672e3,f16316fbf658..df6f2c98eca7 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@@ -257,46 -257,40 +257,41 @@@ static void linear_make_request(struct return; } - do { - sector_t bio_sector = bio->bi_iter.bi_sector; - tmp_dev = which_dev(mddev, bio_sector); - start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; - end_sector = tmp_dev->end_sector; - data_offset = tmp_dev->rdev->data_offset; - bio->bi_bdev = tmp_dev->rdev->bdev; - - if (unlikely(bio_sector >= end_sector || - bio_sector < start_sector)) - goto out_of_bounds; - - if (unlikely(bio_end_sector(bio) > end_sector)) { - /* This bio crosses a device boundary, so we have to - * split it. - */ - split = bio_split(bio, end_sector - bio_sector, - GFP_NOIO, fs_bio_set); - bio_chain(split, bio); - } else { - split = bio; - } + tmp_dev = which_dev(mddev, bio_sector); + start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; + end_sector = tmp_dev->end_sector; + data_offset = tmp_dev->rdev->data_offset; + + if (unlikely(bio_sector >= end_sector || + bio_sector < start_sector)) + goto out_of_bounds; + + if (unlikely(bio_end_sector(bio) > end_sector)) { + /* This bio crosses a device boundary, so we have to split it */ + struct bio *split = bio_split(bio, end_sector - bio_sector, + GFP_NOIO, mddev->bio_set); + bio_chain(split, bio); + generic_make_request(bio); + bio = split; + } - split->bi_iter.bi_sector = split->bi_iter.bi_sector - - start_sector + data_offset; - - if (unlikely((bio_op(split) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { - /* Just ignore it */ - bio_endio(split); - } else { - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(split->bi_bdev), - split, disk_devt(mddev->gendisk), - bio_sector); - mddev_check_writesame(mddev, split); - mddev_check_write_zeroes(mddev, split); - generic_make_request(split); - } - } while (split != bio); + bio->bi_bdev = tmp_dev->rdev->bdev; + bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - + start_sector + data_offset; + + if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { + /* Just ignore it */ + bio_endio(bio); + } else { + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, disk_devt(mddev->gendisk), + bio_sector); + mddev_check_writesame(mddev, bio); ++ mddev_check_write_zeroes(mddev, bio); + generic_make_request(bio); + } return; out_of_bounds: diff --cc drivers/md/md.h index 1e76d64ce180,0418b29945e7..4e75d121bfcc --- a/drivers/md/md.h +++ b/drivers/md/md.h @@@ -710,10 -719,58 +719,64 @@@ static inline void mddev_check_writesam mddev->queue->limits.max_write_same_sectors = 0; } +static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) +{ + if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) + mddev->queue->limits.max_write_zeroes_sectors = 0; +} ++ + /* Maximum size of each resync request */ + #define RESYNC_BLOCK_SIZE (64*1024) + #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) + + /* for managing resync I/O pages */ + struct resync_pages { + unsigned idx; /* for get/put page from the pool */ + void *raid_bio; + struct page *pages[RESYNC_PAGES]; + }; + + static inline int resync_alloc_pages(struct resync_pages *rp, + gfp_t gfp_flags) + { + int i; + + for (i = 0; i < RESYNC_PAGES; i++) { + rp->pages[i] = alloc_page(gfp_flags); + if (!rp->pages[i]) + goto out_free; + } + + return 0; + + out_free: + while (--i >= 0) + put_page(rp->pages[i]); + return -ENOMEM; + } + + static inline void resync_free_pages(struct resync_pages *rp) + { + int i; + + for (i = 0; i < RESYNC_PAGES; i++) + put_page(rp->pages[i]); + } + + static inline void resync_get_all_pages(struct resync_pages *rp) + { + int i; + + for (i = 0; i < RESYNC_PAGES; i++) + get_page(rp->pages[i]); + } + + static inline struct page *resync_fetch_page(struct resync_pages *rp, + unsigned idx) + { + if (WARN_ON_ONCE(idx >= RESYNC_PAGES)) + return NULL; + return rp->pages[idx]; + } - #endif /* _MD_MD_H */ diff --cc drivers/md/raid0.c index ce7a6a56cf73,e777e48f55f6..84e58596594d --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@@ -469,46 -472,43 +473,44 @@@ static void raid0_make_request(struct m return; } - do { - sector_t bio_sector = bio->bi_iter.bi_sector; - sector_t sector = bio_sector; - unsigned chunk_sects = mddev->chunk_sectors; + bio_sector = bio->bi_iter.bi_sector; + sector = bio_sector; + chunk_sects = mddev->chunk_sectors; - unsigned sectors = chunk_sects - - (likely(is_power_of_2(chunk_sects)) - ? (sector & (chunk_sects-1)) - : sector_div(sector, chunk_sects)); + sectors = chunk_sects - + (likely(is_power_of_2(chunk_sects)) + ? (sector & (chunk_sects-1)) + : sector_div(sector, chunk_sects)); - /* Restore due to sector_div */ - sector = bio_sector; + /* Restore due to sector_div */ + sector = bio_sector; - if (sectors < bio_sectors(bio)) { - split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); - bio_chain(split, bio); - } else { - split = bio; - } + if (sectors < bio_sectors(bio)) { + struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set); + bio_chain(split, bio); + generic_make_request(bio); + bio = split; + } - zone = find_zone(mddev->private, §or); - tmp_dev = map_sector(mddev, zone, sector, §or); - split->bi_bdev = tmp_dev->bdev; - split->bi_iter.bi_sector = sector + zone->dev_start + - tmp_dev->data_offset; - - if (unlikely((bio_op(split) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { - /* Just ignore it */ - bio_endio(split); - } else { - if (mddev->gendisk) - trace_block_bio_remap(bdev_get_queue(split->bi_bdev), - split, disk_devt(mddev->gendisk), - bio_sector); - mddev_check_writesame(mddev, split); - mddev_check_write_zeroes(mddev, split); - generic_make_request(split); - } - } while (split != bio); + zone = find_zone(mddev->private, §or); + tmp_dev = map_sector(mddev, zone, sector, §or); + bio->bi_bdev = tmp_dev->bdev; + bio->bi_iter.bi_sector = sector + zone->dev_start + + tmp_dev->data_offset; + + if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { + /* Just ignore it */ + bio_endio(bio); + } else { + if (mddev->gendisk) + trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), + bio, disk_devt(mddev->gendisk), + bio_sector); + mddev_check_writesame(mddev, bio); ++ mddev_check_write_zeroes(mddev, bio); + generic_make_request(bio); + } } static void raid0_status(struct seq_file *seq, struct mddev *mddev) diff --cc drivers/md/raid5.c index 2efdb0d67460,3d971e5a1b0e..2e38cfac5b1d --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@@ -7253,17 -7374,13 +7371,9 @@@ static int raid5_run(struct mddev *mdde stripe = (stripe | (stripe-1)) + 1; mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; - /* - * unaligned part of discard request will be ignored, so can't - * guarantee discard_zeroes_data - */ - mddev->queue->limits.discard_zeroes_data = 0; - /* - * We use 16-bit counter of active stripes in bi_phys_segments - * (minus one for over-loaded initialization) - */ - blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); - blk_queue_max_discard_sectors(mddev->queue, - 0xfffe * STRIPE_SECTORS); - blk_queue_max_write_same_sectors(mddev->queue, 0); + blk_queue_max_write_zeroes_sectors(mddev->queue, 0); rdev_for_each(rdev, mddev) { disk_stack_limits(mddev->gendisk, rdev->bdev,