}
EXPORT_SYMBOL(blk_queue_bounce_limit);
-/**
- * blk_queue_bounce_pfn - set the bounce buffer limit for queue
- * @q: the request queue for the device
- * @pfn: max address
- *
- * Description:
- * This function is similar to blk_queue_bounce_limit except it
- * neither changes allocation flags, nor does it set up the ISA DMA
- * pool. This function should only be used by stacking drivers.
- * Hardware drivers should use blk_queue_bounce_limit instead.
- */
-void blk_queue_bounce_pfn(struct request_queue *q, u64 pfn)
-{
- q->limits.bounce_pfn = pfn;
-}
-EXPORT_SYMBOL(blk_queue_bounce_pfn);
-
/**
* blk_queue_max_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
blk_queue_max_segment_size(q, t->limits.max_segment_size);
blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors);
blk_queue_segment_boundary(q, t->limits.seg_boundary_mask);
- blk_queue_bounce_pfn(q, t->limits.bounce_pfn);
+ blk_queue_bounce_limit(q, t->limits.bounce_pfn);
if (t->limits.no_cluster)
queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
-extern void blk_queue_bounce_pfn(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);