block: Use accessor functions for queue limits
authorMartin K. Petersen <martin.petersen@oracle.com>
Fri, 22 May 2009 21:17:50 +0000 (17:17 -0400)
committerJens Axboe <jens.axboe@oracle.com>
Fri, 22 May 2009 21:22:54 +0000 (23:22 +0200)
Convert all external users of queue limits to using wrapper functions
instead of poking the request queue variables directly.

Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
25 files changed:
block/blk-barrier.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/blk-settings.c
block/blk-sysfs.c
block/compat_ioctl.c
block/ioctl.c
block/scsi_ioctl.c
drivers/block/pktcdvd.c
drivers/cdrom/cdrom.c
drivers/md/dm-table.c
drivers/md/linear.c
drivers/md/multipath.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/scsi/sg.c
drivers/scsi/st.c
drivers/usb/storage/scsiglue.c
fs/bio.c
include/linux/bio.h
include/linux/blkdev.h
mm/bounce.c

index 0d98054cdbd77c19e4bc35397aef702c8edc3e22..30022b4e2f6306b36d6c878efc0ea4abb28e5082 100644 (file)
@@ -388,10 +388,10 @@ int blkdev_issue_discard(struct block_device *bdev,
 
                bio->bi_sector = sector;
 
-               if (nr_sects > q->max_hw_sectors) {
-                       bio->bi_size = q->max_hw_sectors << 9;
-                       nr_sects -= q->max_hw_sectors;
-                       sector += q->max_hw_sectors;
+               if (nr_sects > queue_max_hw_sectors(q)) {
+                       bio->bi_size = queue_max_hw_sectors(q) << 9;
+                       nr_sects -= queue_max_hw_sectors(q);
+                       sector += queue_max_hw_sectors(q);
                } else {
                        bio->bi_size = nr_sects << 9;
                        nr_sects = 0;
index 59c4af5231121c4c819a37b96abcc2fadbd7a7c2..7a4c40184a64cac474ee7ecc72f529769f1f0189 100644 (file)
@@ -1437,11 +1437,11 @@ static inline void __generic_make_request(struct bio *bio)
                        goto end_io;
                }
 
-               if (unlikely(nr_sectors > q->max_hw_sectors)) {
+               if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
                        printk(KERN_ERR "bio too big device %s (%u > %u)\n",
-                               bdevname(bio->bi_bdev, b),
-                               bio_sectors(bio),
-                               q->max_hw_sectors);
+                              bdevname(bio->bi_bdev, b),
+                              bio_sectors(bio),
+                              queue_max_hw_sectors(q));
                        goto end_io;
                }
 
@@ -1608,8 +1608,8 @@ EXPORT_SYMBOL(submit_bio);
  */
 int blk_rq_check_limits(struct request_queue *q, struct request *rq)
 {
-       if (blk_rq_sectors(rq) > q->max_sectors ||
-           blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
+       if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
+           blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
                printk(KERN_ERR "%s: over max size limit.\n", __func__);
                return -EIO;
        }
@@ -1621,8 +1621,8 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq)
         * limitation.
         */
        blk_recalc_rq_segments(rq);
-       if (rq->nr_phys_segments > q->max_phys_segments ||
-           rq->nr_phys_segments > q->max_hw_segments) {
+       if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
+           rq->nr_phys_segments > queue_max_hw_segments(q)) {
                printk(KERN_ERR "%s: over max segments limit.\n", __func__);
                return -EIO;
        }
index ef2492adca7e3af16537ac854280b85821426a5e..9083cf0180cc8a296d2529a4ef9d9943ea5d05c1 100644 (file)
@@ -115,7 +115,7 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
        struct bio *bio = NULL;
        int ret;
 
-       if (len > (q->max_hw_sectors << 9))
+       if (len > (queue_max_hw_sectors(q) << 9))
                return -EINVAL;
        if (!len)
                return -EINVAL;
@@ -292,7 +292,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        struct bio *bio;
        int ret;
 
-       if (len > (q->max_hw_sectors << 9))
+       if (len > (queue_max_hw_sectors(q) << 9))
                return -EINVAL;
        if (!len || !kbuf)
                return -EINVAL;
index 4974dd5767e516f7f2bd507fa10a55cb0da62bd5..39ce64432ba6d4cdcff6427851fe19aac172a2d7 100644 (file)
@@ -32,11 +32,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                         * never considered part of another segment, since that
                         * might change with the bounce page.
                         */
-                       high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+                       high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
                        if (high || highprv)
                                goto new_segment;
                        if (cluster) {
-                               if (seg_size + bv->bv_len > q->max_segment_size)
+                               if (seg_size + bv->bv_len
+                                   > queue_max_segment_size(q))
                                        goto new_segment;
                                if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
                                        goto new_segment;
@@ -91,7 +92,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                return 0;
 
        if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
-           q->max_segment_size)
+           queue_max_segment_size(q))
                return 0;
 
        if (!bio_has_data(bio))
@@ -134,7 +135,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
                int nbytes = bvec->bv_len;
 
                if (bvprv && cluster) {
-                       if (sg->length + nbytes > q->max_segment_size)
+                       if (sg->length + nbytes > queue_max_segment_size(q))
                                goto new_segment;
 
                        if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
@@ -205,8 +206,8 @@ static inline int ll_new_hw_segment(struct request_queue *q,
 {
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
-           || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+       if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
+           req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
                req->cmd_flags |= REQ_NOMERGE;
                if (req == q->last_merge)
                        q->last_merge = NULL;
@@ -227,9 +228,9 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
        unsigned short max_sectors;
 
        if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
+               max_sectors = queue_max_hw_sectors(q);
        else
-               max_sectors = q->max_sectors;
+               max_sectors = queue_max_sectors(q);
 
        if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
                req->cmd_flags |= REQ_NOMERGE;
@@ -251,9 +252,9 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
        unsigned short max_sectors;
 
        if (unlikely(blk_pc_request(req)))
-               max_sectors = q->max_hw_sectors;
+               max_sectors = queue_max_hw_sectors(q);
        else
-               max_sectors = q->max_sectors;
+               max_sectors = queue_max_sectors(q);
 
 
        if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
@@ -287,7 +288,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        /*
         * Will it become too large?
         */
-       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
+       if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
                return 0;
 
        total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -299,10 +300,10 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
                total_phys_segments--;
        }
 
-       if (total_phys_segments > q->max_phys_segments)
+       if (total_phys_segments > queue_max_phys_segments(q))
                return 0;
 
-       if (total_phys_segments > q->max_hw_segments)
+       if (total_phys_segments > queue_max_hw_segments(q))
                return 0;
 
        /* Merge is OK... */
index 15c3164537b8fbd75bcfb306167ca7d6dbd0a675..0b32f984eed24ffdfd391587367428be63a83253 100644 (file)
@@ -219,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 }
 EXPORT_SYMBOL(blk_queue_max_sectors);
 
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+       if (BLK_DEF_MAX_SECTORS > max_sectors)
+               q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
+       else
+               q->max_hw_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
 /**
  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
@@ -395,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
                               dma_drain_needed_fn *dma_drain_needed,
                               void *buf, unsigned int size)
 {
-       if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+       if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
                return -EINVAL;
        /* make room for appending the drain */
-       --q->max_hw_segments;
-       --q->max_phys_segments;
+       blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
+       blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
        q->dma_drain_needed = dma_drain_needed;
        q->dma_drain_buffer = buf;
        q->dma_drain_size = size;
index 13d38b7e4d0fa524fcedf30f6865872205a45a7b..142a4acddd432a1afdec6bc476b804407f50df8e 100644 (file)
@@ -95,7 +95,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
 
 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
 {
-       int max_sectors_kb = q->max_sectors >> 1;
+       int max_sectors_kb = queue_max_sectors(q) >> 1;
 
        return queue_var_show(max_sectors_kb, (page));
 }
@@ -109,7 +109,7 @@ static ssize_t
 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 {
        unsigned long max_sectors_kb,
-                       max_hw_sectors_kb = q->max_hw_sectors >> 1,
+               max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
                        page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 
@@ -117,7 +117,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                return -EINVAL;
 
        spin_lock_irq(q->queue_lock);
-       q->max_sectors = max_sectors_kb << 1;
+       blk_queue_max_sectors(q, max_sectors_kb << 1);
        spin_unlock_irq(q->queue_lock);
 
        return ret;
@@ -125,7 +125,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 
 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
 {
-       int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+       int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
 
        return queue_var_show(max_hw_sectors_kb, (page));
 }
index 9eaa1940273a903b6fff90e1557327d92b369c57..df18a156d011b0f39bd79d70c16bf76049a2d07a 100644 (file)
@@ -766,7 +766,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                return compat_put_int(arg, bdev_logical_block_size(bdev));
        case BLKSECTGET:
                return compat_put_ushort(arg,
-                                        bdev_get_queue(bdev)->max_sectors);
+                                        queue_max_sectors(bdev_get_queue(bdev)));
        case BLKRASET: /* compatible, but no compat_ptr (!) */
        case BLKFRASET:
                if (!capable(CAP_SYS_ADMIN))
index 7aa97f65da82e5a46c10c0876f9da7d16c8cbafc..500e4c73cc52ba3a36b844c9e97404508ccf4b9b 100644 (file)
@@ -152,10 +152,10 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
                bio->bi_private = &wait;
                bio->bi_sector = start;
 
-               if (len > q->max_hw_sectors) {
-                       bio->bi_size = q->max_hw_sectors << 9;
-                       len -= q->max_hw_sectors;
-                       start += q->max_hw_sectors;
+               if (len > queue_max_hw_sectors(q)) {
+                       bio->bi_size = queue_max_hw_sectors(q) << 9;
+                       len -= queue_max_hw_sectors(q);
+                       start += queue_max_hw_sectors(q);
                } else {
                        bio->bi_size = len << 9;
                        len = 0;
@@ -313,7 +313,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case BLKSSZGET: /* get block device hardware sector size */
                return put_int(arg, bdev_logical_block_size(bdev));
        case BLKSECTGET:
-               return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+               return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
        case BLKRASET:
        case BLKFRASET:
                if(!capable(CAP_SYS_ADMIN))
index a9670dd4b5de034cf2130f2fa0f552eda1bd80c5..5f8e798ede4ee6d7c479bce1ee36f5488b717cc9 100644 (file)
@@ -75,7 +75,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
 
 static int sg_get_reserved_size(struct request_queue *q, int __user *p)
 {
-       unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
+       unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
 
        return put_user(val, p);
 }
@@ -89,8 +89,8 @@ static int sg_set_reserved_size(struct request_queue *q, int __user *p)
 
        if (size < 0)
                return -EINVAL;
-       if (size > (q->max_sectors << 9))
-               size = q->max_sectors << 9;
+       if (size > (queue_max_sectors(q) << 9))
+               size = queue_max_sectors(q) << 9;
 
        q->sg_reserved_size = size;
        return 0;
@@ -264,7 +264,7 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
        if (hdr->cmd_len > BLK_MAX_CDB)
                return -EINVAL;
 
-       if (hdr->dxfer_len > (q->max_hw_sectors << 9))
+       if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
                return -EIO;
 
        if (hdr->dxfer_len)
index 293f5858921d483e6198385d661aae93fa41efa8..d57f11759480c1c2874922e773cee62ac6571472 100644 (file)
@@ -991,13 +991,15 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
  */
 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
 {
-       if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+       if ((pd->settings.size << 9) / CD_FRAMESIZE
+           <= queue_max_phys_segments(q)) {
                /*
                 * The cdrom device can handle one segment/frame
                 */
                clear_bit(PACKET_MERGE_SEGS, &pd->flags);
                return 0;
-       } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+       } else if ((pd->settings.size << 9) / PAGE_SIZE
+                  <= queue_max_phys_segments(q)) {
                /*
                 * We can handle this case at the expense of some extra memory
                 * copies during write operations
index cceace61ef286622b93af85fb5c45d134c289eae..71d1b9bab70b515afbe682cca1de5bc4ca3613cb 100644 (file)
@@ -2101,8 +2101,8 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                nr = nframes;
                if (cdi->cdda_method == CDDA_BPC_SINGLE)
                        nr = 1;
-               if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
-                       nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
+               if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
+                       nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
 
                len = nr * CD_FRAMESIZE_RAW;
 
index 65e2d9759857d9b0ef06332b0412b836aee54d9c..e9a73bb242b0935eefae2ce0744ed97965a2143d 100644 (file)
@@ -510,7 +510,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
         *        combine_restrictions_low()
         */
        rs->max_sectors =
-               min_not_zero(rs->max_sectors, q->max_sectors);
+               min_not_zero(rs->max_sectors, queue_max_sectors(q));
 
        /*
         * Check if merge fn is supported.
@@ -525,25 +525,25 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
 
        rs->max_phys_segments =
                min_not_zero(rs->max_phys_segments,
-                            q->max_phys_segments);
+                            queue_max_phys_segments(q));
 
        rs->max_hw_segments =
-               min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+               min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
 
        rs->logical_block_size = max(rs->logical_block_size,
                                     queue_logical_block_size(q));
 
        rs->max_segment_size =
-               min_not_zero(rs->max_segment_size, q->max_segment_size);
+               min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
 
        rs->max_hw_sectors =
-               min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+               min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
 
        rs->seg_boundary_mask =
                min_not_zero(rs->seg_boundary_mask,
-                            q->seg_boundary_mask);
+                            queue_segment_boundary(q));
 
-       rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+       rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
 
        rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 }
@@ -914,13 +914,13 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
         * restrictions.
         */
        blk_queue_max_sectors(q, t->limits.max_sectors);
-       q->max_phys_segments = t->limits.max_phys_segments;
-       q->max_hw_segments = t->limits.max_hw_segments;
-       q->logical_block_size = t->limits.logical_block_size;
-       q->max_segment_size = t->limits.max_segment_size;
-       q->max_hw_sectors = t->limits.max_hw_sectors;
-       q->seg_boundary_mask = t->limits.seg_boundary_mask;
-       q->bounce_pfn = t->limits.bounce_pfn;
+       blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
+       blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
+       blk_queue_logical_block_size(q, t->limits.logical_block_size);
+       blk_queue_max_segment_size(q, t->limits.max_segment_size);
+       blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
+       blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
+       blk_queue_bounce_limit(q, t->limits.bounce_pfn);
 
        if (t->limits.no_cluster)
                queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
index 7a36e38393a1e9ff24defd03c756138a9f5da903..64f1f3e046e088dc925e00881583484c5e7bc2cd 100644 (file)
@@ -146,7 +146,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
                 * a one page request is never in violation.
                 */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
                        blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                disk->num_sectors = rdev->sectors;
index 41ced0cbe823c7275cc2f79172cc9913385fcd2f..4ee31aa13c4073faa2bbfb44f85cb9afd3c96624 100644 (file)
@@ -303,7 +303,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                 * merge_bvec_fn will be involved in multipath.)
                 */
                        if (q->merge_bvec_fn &&
-                           mddev->queue->max_sectors > (PAGE_SIZE>>9))
+                           queue_max_sectors(q) > (PAGE_SIZE>>9))
                                blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        conf->working_disks++;
@@ -467,7 +467,7 @@ static int multipath_run (mddev_t *mddev)
                 * violating it, not that we ever expect a device with
                 * a merge_bvec_fn to be involved in multipath */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
                        blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                if (!test_bit(Faulty, &rdev->flags))
index c08d7559be5531fb01bedb55e4663a0e40092607..925507e7d673babb32585684916976ad01d04025 100644 (file)
@@ -144,7 +144,7 @@ static int create_strip_zones (mddev_t *mddev)
                 */
 
                if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
                        blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                if (!smallest || (rdev1->sectors < smallest->sectors))
index 36df9109cde18bb33aec2992afbfb00b1309b587..e23758b4a34e2b65c157be93903d8f416cba7cec 100644 (file)
@@ -1130,7 +1130,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                         * a one page request is never in violation.
                         */
                        if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                           mddev->queue->max_sectors > (PAGE_SIZE>>9))
+                           queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
                                blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        p->head_position = 0;
@@ -1996,7 +1996,7 @@ static int run(mddev_t *mddev)
                 * a one page request is never in violation.
                 */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
                        blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                disk->head_position = 0;
index 499620afb44b259184d3a04ba73fb5a1e7d2c724..750550c1166fe4725efec1935d38d7eaeb4779a4 100644 (file)
@@ -1158,8 +1158,8 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
                         * a one page request is never in violation.
                         */
                        if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                           mddev->queue->max_sectors > (PAGE_SIZE>>9))
-                               mddev->queue->max_sectors = (PAGE_SIZE>>9);
+                           queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                               blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                        p->head_position = 0;
                        rdev->raid_disk = mirror;
@@ -2145,8 +2145,8 @@ static int run(mddev_t *mddev)
                 * a one page request is never in violation.
                 */
                if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-                   mddev->queue->max_sectors > (PAGE_SIZE>>9))
-                       mddev->queue->max_sectors = (PAGE_SIZE>>9);
+                   queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+                       blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
 
                disk->head_position = 0;
        }
index 4616bc3a6e713065e689943db4cdde421076d318..7970dc8c522eb0af058c2da573bcaa524384cd20 100644 (file)
@@ -3463,10 +3463,10 @@ static int bio_fits_rdev(struct bio *bi)
 {
        struct request_queue *q = bdev_get_queue(bi->bi_bdev);
 
-       if ((bi->bi_size>>9) > q->max_sectors)
+       if ((bi->bi_size>>9) > queue_max_sectors(q))
                return 0;
        blk_recount_segments(q, bi);
-       if (bi->bi_phys_segments > q->max_phys_segments)
+       if (bi->bi_phys_segments > queue_max_phys_segments(q))
                return 0;
 
        if (q->merge_bvec_fn)
index 0fc2c0ae7691b4b6f38672880ccfed3a7fbacea3..9bd407fa98e4f0e6c7053b407bd124d52bfe14a0 100644 (file)
@@ -289,8 +289,8 @@ sg_open(struct inode *inode, struct file *filp)
        if (list_empty(&sdp->sfds)) {   /* no existing opens on this device */
                sdp->sgdebug = 0;
                q = sdp->device->request_queue;
-               sdp->sg_tablesize = min(q->max_hw_segments,
-                                       q->max_phys_segments);
+               sdp->sg_tablesize = min(queue_max_hw_segments(q),
+                                       queue_max_phys_segments(q));
        }
        if ((sfp = sg_add_sfp(sdp, dev)))
                filp->private_data = sfp;
@@ -909,7 +909,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
                 if (val < 0)
                         return -EINVAL;
                val = min_t(int, val,
-                               sdp->device->request_queue->max_sectors * 512);
+                           queue_max_sectors(sdp->device->request_queue) * 512);
                if (val != sfp->reserve.bufflen) {
                        if (sg_res_in_use(sfp) || sfp->mmap_called)
                                return -EBUSY;
@@ -919,7 +919,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
                return 0;
        case SG_GET_RESERVED_SIZE:
                val = min_t(int, sfp->reserve.bufflen,
-                               sdp->device->request_queue->max_sectors * 512);
+                           queue_max_sectors(sdp->device->request_queue) * 512);
                return put_user(val, ip);
        case SG_SET_COMMAND_Q:
                result = get_user(val, ip);
@@ -1059,7 +1059,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
                        return -ENODEV;
                return scsi_ioctl(sdp->device, cmd_in, p);
        case BLKSECTGET:
-               return put_user(sdp->device->request_queue->max_sectors * 512,
+               return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
                                ip);
        case BLKTRACESETUP:
                return blk_trace_setup(sdp->device->request_queue,
@@ -1377,7 +1377,8 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
        sdp->device = scsidp;
        INIT_LIST_HEAD(&sdp->sfds);
        init_waitqueue_head(&sdp->o_excl_wait);
-       sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+       sdp->sg_tablesize = min(queue_max_hw_segments(q),
+                               queue_max_phys_segments(q));
        sdp->index = k;
        kref_init(&sdp->d_ref);
 
@@ -2055,7 +2056,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
                sg_big_buff = def_reserved_size;
 
        bufflen = min_t(int, sg_big_buff,
-                       sdp->device->request_queue->max_sectors * 512);
+                       queue_max_sectors(sdp->device->request_queue) * 512);
        sg_build_reserve(sfp, bufflen);
        SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
                           sfp->reserve.bufflen, sfp->reserve.k_use_sg));
index 8681b708344f0130265eba1e3a34456fd1258abe..89bd438e1fe30692e006a0bac18c18fc4c21cd86 100644 (file)
@@ -3983,8 +3983,8 @@ static int st_probe(struct device *dev)
                return -ENODEV;
        }
 
-       i = min(SDp->request_queue->max_hw_segments,
-               SDp->request_queue->max_phys_segments);
+       i = min(queue_max_hw_segments(SDp->request_queue),
+               queue_max_phys_segments(SDp->request_queue));
        if (st_max_sg_segs < i)
                i = st_max_sg_segs;
        buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
index 4ca3b586064375c939ae195e260221b519ed06d0..cfa26d56ce60c4f2c69346aae433444a5c812297 100644 (file)
@@ -132,7 +132,7 @@ static int slave_configure(struct scsi_device *sdev)
 
                if (us->fflags & US_FL_MAX_SECTORS_MIN)
                        max_sectors = PAGE_CACHE_SIZE >> 9;
-               if (sdev->request_queue->max_sectors > max_sectors)
+               if (queue_max_sectors(sdev->request_queue) > max_sectors)
                        blk_queue_max_sectors(sdev->request_queue,
                                              max_sectors);
        } else if (sdev->type == TYPE_TAPE) {
@@ -483,7 +483,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
 {
        struct scsi_device *sdev = to_scsi_device(dev);
 
-       return sprintf(buf, "%u\n", sdev->request_queue->max_sectors);
+       return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
 }
 
 /* Input routine for the sysfs max_sectors file */
index 4445c38217304b5eaf934c0c96c1defc92a67e98..ab423a1024ab8f012b4514ae0825b1b66f270d37 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -499,11 +499,11 @@ int bio_get_nr_vecs(struct block_device *bdev)
        struct request_queue *q = bdev_get_queue(bdev);
        int nr_pages;
 
-       nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (nr_pages > q->max_phys_segments)
-               nr_pages = q->max_phys_segments;
-       if (nr_pages > q->max_hw_segments)
-               nr_pages = q->max_hw_segments;
+       nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (nr_pages > queue_max_phys_segments(q))
+               nr_pages = queue_max_phys_segments(q);
+       if (nr_pages > queue_max_hw_segments(q))
+               nr_pages = queue_max_hw_segments(q);
 
        return nr_pages;
 }
@@ -562,8 +562,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
         * make this too complex.
         */
 
-       while (bio->bi_phys_segments >= q->max_phys_segments
-              || bio->bi_phys_segments >= q->max_hw_segments) {
+       while (bio->bi_phys_segments >= queue_max_phys_segments(q)
+              || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
 
                if (retried_segments)
                        return 0;
@@ -634,7 +634,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
                    unsigned int len, unsigned int offset)
 {
-       return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
+       return __bio_add_page(q, bio, page, len, offset,
+                             queue_max_hw_sectors(q));
 }
 
 /**
@@ -654,7 +655,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
                 unsigned int offset)
 {
        struct request_queue *q = bdev_get_queue(bio->bi_bdev);
-       return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
+       return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
 }
 
 struct bio_map_data {
index d30ec6f30dd7ee21cc7cb78bb06da62f513cd2df..12737be58601108f81f486fcdfac99c4875d8fb5 100644 (file)
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
-       __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+       __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
 #define BIO_SEG_BOUNDARY(q, b1, b2) \
        BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
 
index 872b78b7a1014dc3038f3ecd567328f98e7624f8..29b48f7b4ba82c968ed96a8a5e5d80a9948bb257 100644 (file)
@@ -898,6 +898,7 @@ extern void blk_cleanup_queue(struct request_queue *);
 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
 extern void blk_queue_bounce_limit(struct request_queue *, u64);
 extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
@@ -988,6 +989,41 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
 
 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
 
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+       return q->bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+       return q->seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+       return q->max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+       return q->max_hw_sectors;
+}
+
+static inline unsigned short queue_max_hw_segments(struct request_queue *q)
+{
+       return q->max_hw_segments;
+}
+
+static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+{
+       return q->max_phys_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+       return q->max_segment_size;
+}
+
 static inline unsigned short queue_logical_block_size(struct request_queue *q)
 {
        int retval = 512;
index e590272fe7a8f3e40acb21059bb0082f74354f26..8dcd4315e01c03e46f40fb10866b483c6829df08 100644 (file)
@@ -192,7 +192,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
                /*
                 * is destination page below bounce pfn?
                 */
-               if (page_to_pfn(page) <= q->bounce_pfn)
+               if (page_to_pfn(page) <= queue_bounce_pfn(q))
                        continue;
 
                /*
@@ -284,7 +284,7 @@ void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
         * don't waste time iterating over bio segments
         */
        if (!(q->bounce_gfp & GFP_DMA)) {
-               if (q->bounce_pfn >= blk_max_pfn)
+               if (queue_bounce_pfn(q) >= blk_max_pfn)
                        return;
                pool = page_pool;
        } else {