Merge branch 'md-next' into md-linus
authorShaohua Li <shli@fb.com>
Mon, 1 May 2017 21:09:21 +0000 (14:09 -0700)
committerShaohua Li <shli@fb.com>
Mon, 1 May 2017 21:09:21 +0000 (14:09 -0700)
1  2 
block/bio.c
drivers/md/linear.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
include/linux/bio.h

diff --cc block/bio.c
Simple merge
index 377a8a3672e3da107119cadaf6f8add62eca6f5b,f16316fbf6584195d62f52ca294c07d9833f944e..df6f2c98eca74f905d2482ef66a7ac1965e82b42
@@@ -257,46 -257,40 +257,41 @@@ static void linear_make_request(struct 
                return;
        }
  
-       do {
-               sector_t bio_sector = bio->bi_iter.bi_sector;
-               tmp_dev = which_dev(mddev, bio_sector);
-               start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
-               end_sector = tmp_dev->end_sector;
-               data_offset = tmp_dev->rdev->data_offset;
-               bio->bi_bdev = tmp_dev->rdev->bdev;
-               if (unlikely(bio_sector >= end_sector ||
-                            bio_sector < start_sector))
-                       goto out_of_bounds;
-               if (unlikely(bio_end_sector(bio) > end_sector)) {
-                       /* This bio crosses a device boundary, so we have to
-                        * split it.
-                        */
-                       split = bio_split(bio, end_sector - bio_sector,
-                                         GFP_NOIO, fs_bio_set);
-                       bio_chain(split, bio);
-               } else {
-                       split = bio;
-               }
+       tmp_dev = which_dev(mddev, bio_sector);
+       start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
+       end_sector = tmp_dev->end_sector;
+       data_offset = tmp_dev->rdev->data_offset;
+       if (unlikely(bio_sector >= end_sector ||
+                    bio_sector < start_sector))
+               goto out_of_bounds;
+       if (unlikely(bio_end_sector(bio) > end_sector)) {
+               /* This bio crosses a device boundary, so we have to split it */
+               struct bio *split = bio_split(bio, end_sector - bio_sector,
+                                             GFP_NOIO, mddev->bio_set);
+               bio_chain(split, bio);
+               generic_make_request(bio);
+               bio = split;
+       }
  
-               split->bi_iter.bi_sector = split->bi_iter.bi_sector -
-                       start_sector + data_offset;
-               if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
-                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
-                       /* Just ignore it */
-                       bio_endio(split);
-               } else {
-                       if (mddev->gendisk)
-                               trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
-                                                     split, disk_devt(mddev->gendisk),
-                                                     bio_sector);
-                       mddev_check_writesame(mddev, split);
-                       mddev_check_write_zeroes(mddev, split);
-                       generic_make_request(split);
-               }
-       } while (split != bio);
+       bio->bi_bdev = tmp_dev->rdev->bdev;
+       bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
+               start_sector + data_offset;
+       if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
+               /* Just ignore it */
+               bio_endio(bio);
+       } else {
+               if (mddev->gendisk)
+                       trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+                                             bio, disk_devt(mddev->gendisk),
+                                             bio_sector);
+               mddev_check_writesame(mddev, bio);
++              mddev_check_write_zeroes(mddev, bio);
+               generic_make_request(bio);
+       }
        return;
  
  out_of_bounds:
diff --cc drivers/md/md.h
index 1e76d64ce1803be229477fd83887514980390dee,0418b29945e7a88ab414c96ff2f623a9f891bcaa..4e75d121bfcc5671640421769476c85d2843e7c9
@@@ -710,10 -719,58 +719,64 @@@ static inline void mddev_check_writesam
                mddev->queue->limits.max_write_same_sectors = 0;
  }
  
 -
 +static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
 +{
 +      if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
 +          !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
 +              mddev->queue->limits.max_write_zeroes_sectors = 0;
 +}
++
+ /* Maximum size of each resync request */
+ #define RESYNC_BLOCK_SIZE (64*1024)
+ #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
+ /* for managing resync I/O pages */
+ struct resync_pages {
+       unsigned        idx;    /* for get/put page from the pool */
+       void            *raid_bio;
+       struct page     *pages[RESYNC_PAGES];
+ };
+ static inline int resync_alloc_pages(struct resync_pages *rp,
+                                    gfp_t gfp_flags)
+ {
+       int i;
+       for (i = 0; i < RESYNC_PAGES; i++) {
+               rp->pages[i] = alloc_page(gfp_flags);
+               if (!rp->pages[i])
+                       goto out_free;
+       }
+       return 0;
+ out_free:
+       while (--i >= 0)
+               put_page(rp->pages[i]);
+       return -ENOMEM;
+ }
+ static inline void resync_free_pages(struct resync_pages *rp)
+ {
+       int i;
+       for (i = 0; i < RESYNC_PAGES; i++)
+               put_page(rp->pages[i]);
+ }
+ static inline void resync_get_all_pages(struct resync_pages *rp)
+ {
+       int i;
+       for (i = 0; i < RESYNC_PAGES; i++)
+               get_page(rp->pages[i]);
+ }
+ static inline struct page *resync_fetch_page(struct resync_pages *rp,
+                                            unsigned idx)
+ {
+       if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
+               return NULL;
+       return rp->pages[idx];
+ }
  #endif /* _MD_MD_H */
index ce7a6a56cf7385284bb702ab888e4f3ce2cf4821,e777e48f55f61a8f6d7944301926776115eea0c5..84e58596594db79f0dde1f789de9a200fb1f0e8c
@@@ -469,46 -472,43 +473,44 @@@ static void raid0_make_request(struct m
                return;
        }
  
-       do {
-               sector_t bio_sector = bio->bi_iter.bi_sector;
-               sector_t sector = bio_sector;
-               unsigned chunk_sects = mddev->chunk_sectors;
+       bio_sector = bio->bi_iter.bi_sector;
+       sector = bio_sector;
+       chunk_sects = mddev->chunk_sectors;
  
-               unsigned sectors = chunk_sects -
-                       (likely(is_power_of_2(chunk_sects))
-                        ? (sector & (chunk_sects-1))
-                        : sector_div(sector, chunk_sects));
+       sectors = chunk_sects -
+               (likely(is_power_of_2(chunk_sects))
+                ? (sector & (chunk_sects-1))
+                : sector_div(sector, chunk_sects));
  
-               /* Restore due to sector_div */
-               sector = bio_sector;
+       /* Restore due to sector_div */
+       sector = bio_sector;
  
-               if (sectors < bio_sectors(bio)) {
-                       split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
-                       bio_chain(split, bio);
-               } else {
-                       split = bio;
-               }
+       if (sectors < bio_sectors(bio)) {
+               struct bio *split = bio_split(bio, sectors, GFP_NOIO, mddev->bio_set);
+               bio_chain(split, bio);
+               generic_make_request(bio);
+               bio = split;
+       }
  
-               zone = find_zone(mddev->private, &sector);
-               tmp_dev = map_sector(mddev, zone, sector, &sector);
-               split->bi_bdev = tmp_dev->bdev;
-               split->bi_iter.bi_sector = sector + zone->dev_start +
-                       tmp_dev->data_offset;
-               if (unlikely((bio_op(split) == REQ_OP_DISCARD) &&
-                        !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
-                       /* Just ignore it */
-                       bio_endio(split);
-               } else {
-                       if (mddev->gendisk)
-                               trace_block_bio_remap(bdev_get_queue(split->bi_bdev),
-                                                     split, disk_devt(mddev->gendisk),
-                                                     bio_sector);
-                       mddev_check_writesame(mddev, split);
-                       mddev_check_write_zeroes(mddev, split);
-                       generic_make_request(split);
-               }
-       } while (split != bio);
+       zone = find_zone(mddev->private, &sector);
+       tmp_dev = map_sector(mddev, zone, sector, &sector);
+       bio->bi_bdev = tmp_dev->bdev;
+       bio->bi_iter.bi_sector = sector + zone->dev_start +
+               tmp_dev->data_offset;
+       if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
+                    !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
+               /* Just ignore it */
+               bio_endio(bio);
+       } else {
+               if (mddev->gendisk)
+                       trace_block_bio_remap(bdev_get_queue(bio->bi_bdev),
+                                             bio, disk_devt(mddev->gendisk),
+                                             bio_sector);
+               mddev_check_writesame(mddev, bio);
++              mddev_check_write_zeroes(mddev, bio);
+               generic_make_request(bio);
+       }
  }
  
  static void raid0_status(struct seq_file *seq, struct mddev *mddev)
Simple merge
Simple merge
index 2efdb0d6746074a0416f18b0c0dfbab7cc5be6a8,3d971e5a1b0e9fed8d486b549a88b0191f10a061..2e38cfac5b1dc5a318f66b4bb6e2e2195797ad53
@@@ -7253,17 -7374,13 +7371,9 @@@ static int raid5_run(struct mddev *mdde
                        stripe = (stripe | (stripe-1)) + 1;
                mddev->queue->limits.discard_alignment = stripe;
                mddev->queue->limits.discard_granularity = stripe;
 -              /*
 -               * unaligned part of discard request will be ignored, so can't
 -               * guarantee discard_zeroes_data
 -               */
 -              mddev->queue->limits.discard_zeroes_data = 0;
  
-               /*
-                * We use 16-bit counter of active stripes in bi_phys_segments
-                * (minus one for over-loaded initialization)
-                */
-               blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
-               blk_queue_max_discard_sectors(mddev->queue,
-                                             0xfffe * STRIPE_SECTORS);
                blk_queue_max_write_same_sectors(mddev->queue, 0);
 +              blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
  
                rdev_for_each(rdev, mddev) {
                        disk_stack_limits(mddev->gendisk, rdev->bdev,
Simple merge