bio->bi_sector = sector;
- if (nr_sects > q->max_hw_sectors) {
- bio->bi_size = q->max_hw_sectors << 9;
- nr_sects -= q->max_hw_sectors;
- sector += q->max_hw_sectors;
+ if (nr_sects > queue_max_hw_sectors(q)) {
+ bio->bi_size = queue_max_hw_sectors(q) << 9;
+ nr_sects -= queue_max_hw_sectors(q);
+ sector += queue_max_hw_sectors(q);
} else {
bio->bi_size = nr_sects << 9;
nr_sects = 0;
goto end_io;
}
- if (unlikely(nr_sectors > q->max_hw_sectors)) {
+ if (unlikely(nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
- bdevname(bio->bi_bdev, b),
- bio_sectors(bio),
- q->max_hw_sectors);
+ bdevname(bio->bi_bdev, b),
+ bio_sectors(bio),
+ queue_max_hw_sectors(q));
goto end_io;
}
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (blk_rq_sectors(rq) > q->max_sectors ||
- blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
+ if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
+ blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
* limitation.
*/
blk_recalc_rq_segments(rq);
- if (rq->nr_phys_segments > q->max_phys_segments ||
- rq->nr_phys_segments > q->max_hw_segments) {
+ if (rq->nr_phys_segments > queue_max_phys_segments(q) ||
+ rq->nr_phys_segments > queue_max_hw_segments(q)) {
printk(KERN_ERR "%s: over max segments limit.\n", __func__);
return -EIO;
}
struct bio *bio = NULL;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
struct bio *bio;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
* never considered part of another segment, since that
* might change with the bounce page.
*/
- high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
+ high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q);
if (high || highprv)
goto new_segment;
if (cluster) {
- if (seg_size + bv->bv_len > q->max_segment_size)
+ if (seg_size + bv->bv_len
+ > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
goto new_segment;
return 0;
if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
- q->max_segment_size)
+ queue_max_segment_size(q))
return 0;
if (!bio_has_data(bio))
int nbytes = bvec->bv_len;
if (bvprv && cluster) {
- if (sg->length + nbytes > q->max_segment_size)
+ if (sg->length + nbytes > queue_max_segment_size(q))
goto new_segment;
if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
{
int nr_phys_segs = bio_phys_segments(q, bio);
- if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
- || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+ if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) ||
+ req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
req->cmd_flags |= REQ_NOMERGE;
unsigned short max_sectors;
if (unlikely(blk_pc_request(req)))
- max_sectors = q->max_hw_sectors;
+ max_sectors = queue_max_hw_sectors(q);
else
- max_sectors = q->max_sectors;
+ max_sectors = queue_max_sectors(q);
if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
/*
* Will it become too large?
*/
- if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors)
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
total_phys_segments--;
}
- if (total_phys_segments > q->max_phys_segments)
+ if (total_phys_segments > queue_max_phys_segments(q))
return 0;
- if (total_phys_segments > q->max_hw_segments)
+ if (total_phys_segments > queue_max_hw_segments(q))
return 0;
/* Merge is OK... */
}
EXPORT_SYMBOL(blk_queue_max_sectors);
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+ if (BLK_DEF_MAX_SECTORS > max_sectors)
+ q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
+ else
+ q->max_hw_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
/**
* blk_queue_max_phys_segments - set max phys segments for a request for this queue
* @q: the request queue for the device
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
- if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+ if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
return -EINVAL;
/* make room for appending the drain */
- --q->max_hw_segments;
- --q->max_phys_segments;
+ blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
+ blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf;
q->dma_drain_size = size;
static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
{
- int max_sectors_kb = q->max_sectors >> 1;
+ int max_sectors_kb = queue_max_sectors(q) >> 1;
return queue_var_show(max_sectors_kb, (page));
}
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
- max_hw_sectors_kb = q->max_hw_sectors >> 1,
+ max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
return -EINVAL;
spin_lock_irq(q->queue_lock);
- q->max_sectors = max_sectors_kb << 1;
+ blk_queue_max_sectors(q, max_sectors_kb << 1);
spin_unlock_irq(q->queue_lock);
return ret;
static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
{
- int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+ int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
return queue_var_show(max_hw_sectors_kb, (page));
}
return compat_put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
return compat_put_ushort(arg,
- bdev_get_queue(bdev)->max_sectors);
+ queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET: /* compatible, but no compat_ptr (!) */
case BLKFRASET:
if (!capable(CAP_SYS_ADMIN))
bio->bi_private = &wait;
bio->bi_sector = start;
- if (len > q->max_hw_sectors) {
- bio->bi_size = q->max_hw_sectors << 9;
- len -= q->max_hw_sectors;
- start += q->max_hw_sectors;
+ if (len > queue_max_hw_sectors(q)) {
+ bio->bi_size = queue_max_hw_sectors(q) << 9;
+ len -= queue_max_hw_sectors(q);
+ start += queue_max_hw_sectors(q);
} else {
bio->bi_size = len << 9;
len = 0;
case BLKSSZGET: /* get block device hardware sector size */
return put_int(arg, bdev_logical_block_size(bdev));
case BLKSECTGET:
- return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+ return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev)));
case BLKRASET:
case BLKFRASET:
if(!capable(CAP_SYS_ADMIN))
static int sg_get_reserved_size(struct request_queue *q, int __user *p)
{
- unsigned val = min(q->sg_reserved_size, q->max_sectors << 9);
+ unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9);
return put_user(val, p);
}
if (size < 0)
return -EINVAL;
- if (size > (q->max_sectors << 9))
- size = q->max_sectors << 9;
+ if (size > (queue_max_sectors(q) << 9))
+ size = queue_max_sectors(q) << 9;
q->sg_reserved_size = size;
return 0;
if (hdr->cmd_len > BLK_MAX_CDB)
return -EINVAL;
- if (hdr->dxfer_len > (q->max_hw_sectors << 9))
+ if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
return -EIO;
if (hdr->dxfer_len)
*/
static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
{
- if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+ if ((pd->settings.size << 9) / CD_FRAMESIZE
+ <= queue_max_phys_segments(q)) {
/*
* The cdrom device can handle one segment/frame
*/
clear_bit(PACKET_MERGE_SEGS, &pd->flags);
return 0;
- } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+ } else if ((pd->settings.size << 9) / PAGE_SIZE
+ <= queue_max_phys_segments(q)) {
/*
* We can handle this case at the expense of some extra memory
* copies during write operations
nr = nframes;
if (cdi->cdda_method == CDDA_BPC_SINGLE)
nr = 1;
- if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9))
- nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW;
+ if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9))
+ nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW;
len = nr * CD_FRAMESIZE_RAW;
* combine_restrictions_low()
*/
rs->max_sectors =
- min_not_zero(rs->max_sectors, q->max_sectors);
+ min_not_zero(rs->max_sectors, queue_max_sectors(q));
/*
* Check if merge fn is supported.
rs->max_phys_segments =
min_not_zero(rs->max_phys_segments,
- q->max_phys_segments);
+ queue_max_phys_segments(q));
rs->max_hw_segments =
- min_not_zero(rs->max_hw_segments, q->max_hw_segments);
+ min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q));
rs->logical_block_size = max(rs->logical_block_size,
queue_logical_block_size(q));
rs->max_segment_size =
- min_not_zero(rs->max_segment_size, q->max_segment_size);
+ min_not_zero(rs->max_segment_size, queue_max_segment_size(q));
rs->max_hw_sectors =
- min_not_zero(rs->max_hw_sectors, q->max_hw_sectors);
+ min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q));
rs->seg_boundary_mask =
min_not_zero(rs->seg_boundary_mask,
- q->seg_boundary_mask);
+ queue_segment_boundary(q));
- rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
+ rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q));
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
}
* restrictions.
*/
blk_queue_max_sectors(q, t->limits.max_sectors);
- q->max_phys_segments = t->limits.max_phys_segments;
- q->max_hw_segments = t->limits.max_hw_segments;
- q->logical_block_size = t->limits.logical_block_size;
- q->max_segment_size = t->limits.max_segment_size;
- q->max_hw_sectors = t->limits.max_hw_sectors;
- q->seg_boundary_mask = t->limits.seg_boundary_mask;
- q->bounce_pfn = t->limits.bounce_pfn;
+ blk_queue_max_phys_segments(q, t->limits.max_phys_segments);
+ blk_queue_max_hw_segments(q, t->limits.max_hw_segments);
+ blk_queue_logical_block_size(q, t->limits.logical_block_size);
+ blk_queue_max_segment_size(q, t->limits.max_segment_size);
+ blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
+ blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
+ blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->num_sectors = rdev->sectors;
* merge_bvec_fn will be involved in multipath.)
*/
if (q->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(q) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
conf->working_disks++;
* violating it, not that we ever expect a device with
* a merge_bvec_fn to be involved in multipath */
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!test_bit(Faulty, &rdev->flags))
*/
if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
if (!smallest || (rdev1->sectors < smallest->sectors))
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
p->head_position = 0;
rdev->raid_disk = mirror;
* a one page request is never in violation.
*/
if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
- mddev->queue->max_sectors > (PAGE_SIZE>>9))
- mddev->queue->max_sectors = (PAGE_SIZE>>9);
+ queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
+ blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
disk->head_position = 0;
}
{
struct request_queue *q = bdev_get_queue(bi->bi_bdev);
- if ((bi->bi_size>>9) > q->max_sectors)
+ if ((bi->bi_size>>9) > queue_max_sectors(q))
return 0;
blk_recount_segments(q, bi);
- if (bi->bi_phys_segments > q->max_phys_segments)
+ if (bi->bi_phys_segments > queue_max_phys_segments(q))
return 0;
if (q->merge_bvec_fn)
if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
- sdp->sg_tablesize = min(q->max_hw_segments,
- q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
}
if ((sfp = sg_add_sfp(sdp, dev)))
filp->private_data = sfp;
if (val < 0)
return -EINVAL;
val = min_t(int, val,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
if (val != sfp->reserve.bufflen) {
if (sg_res_in_use(sfp) || sfp->mmap_called)
return -EBUSY;
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
return put_user(val, ip);
case SG_SET_COMMAND_Q:
result = get_user(val, ip);
return -ENODEV;
return scsi_ioctl(sdp->device, cmd_in, p);
case BLKSECTGET:
- return put_user(sdp->device->request_queue->max_sectors * 512,
+ return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
ip);
case BLKTRACESETUP:
return blk_trace_setup(sdp->device->request_queue,
sdp->device = scsidp;
INIT_LIST_HEAD(&sdp->sfds);
init_waitqueue_head(&sdp->o_excl_wait);
- sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+ sdp->sg_tablesize = min(queue_max_hw_segments(q),
+ queue_max_phys_segments(q));
sdp->index = k;
kref_init(&sdp->d_ref);
sg_big_buff = def_reserved_size;
bufflen = min_t(int, sg_big_buff,
- sdp->device->request_queue->max_sectors * 512);
+ queue_max_sectors(sdp->device->request_queue) * 512);
sg_build_reserve(sfp, bufflen);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
sfp->reserve.bufflen, sfp->reserve.k_use_sg));
return -ENODEV;
}
- i = min(SDp->request_queue->max_hw_segments,
- SDp->request_queue->max_phys_segments);
+ i = min(queue_max_hw_segments(SDp->request_queue),
+ queue_max_phys_segments(SDp->request_queue));
if (st_max_sg_segs < i)
i = st_max_sg_segs;
buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (sdev->request_queue->max_sectors > max_sectors)
+ if (queue_max_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", sdev->request_queue->max_sectors);
+ return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
struct request_queue *q = bdev_get_queue(bdev);
int nr_pages;
- nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (nr_pages > q->max_phys_segments)
- nr_pages = q->max_phys_segments;
- if (nr_pages > q->max_hw_segments)
- nr_pages = q->max_hw_segments;
+ nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (nr_pages > queue_max_phys_segments(q))
+ nr_pages = queue_max_phys_segments(q);
+ if (nr_pages > queue_max_hw_segments(q))
+ nr_pages = queue_max_hw_segments(q);
return nr_pages;
}
* make this too complex.
*/
- while (bio->bi_phys_segments >= q->max_phys_segments
- || bio->bi_phys_segments >= q->max_hw_segments) {
+ while (bio->bi_phys_segments >= queue_max_phys_segments(q)
+ || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
if (retried_segments)
return 0;
int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
unsigned int len, unsigned int offset)
{
- return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
+ return __bio_add_page(q, bio, page, len, offset,
+ queue_max_hw_sectors(q));
}
/**
unsigned int offset)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
- return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
+ return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
}
struct bio_map_data {
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+ return q->bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+ return q->seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+ return q->max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+ return q->max_hw_sectors;
+}
+
+static inline unsigned short queue_max_hw_segments(struct request_queue *q)
+{
+ return q->max_hw_segments;
+}
+
+static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+{
+ return q->max_phys_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+ return q->max_segment_size;
+}
+
static inline unsigned short queue_logical_block_size(struct request_queue *q)
{
int retval = 512;
/*
* is destination page below bounce pfn?
*/
- if (page_to_pfn(page) <= q->bounce_pfn)
+ if (page_to_pfn(page) <= queue_bounce_pfn(q))
continue;
/*
* don't waste time iterating over bio segments
*/
if (!(q->bounce_gfp & GFP_DMA)) {
- if (q->bounce_pfn >= blk_max_pfn)
+ if (queue_bounce_pfn(q) >= blk_max_pfn)
return;
pool = page_pool;
} else {