block: do not pass disk names as format strings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-settings.c
CommitLineData
86db1e29
JA
1/*
2 * Functions related to setting various queue properties from drivers
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/init.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
70dd5bf3 10#include <linux/gcd.h>
2cda2728 11#include <linux/lcm.h>
ad5ebd2f 12#include <linux/jiffies.h>
5a0e3ad6 13#include <linux/gfp.h>
86db1e29
JA
14
15#include "blk.h"
16
6728cb0e 17unsigned long blk_max_low_pfn;
86db1e29 18EXPORT_SYMBOL(blk_max_low_pfn);
6728cb0e
JA
19
20unsigned long blk_max_pfn;
86db1e29
JA
21
22/**
23 * blk_queue_prep_rq - set a prepare_request function for queue
24 * @q: queue
25 * @pfn: prepare_request function
26 *
27 * It's possible for a queue to register a prepare_request callback which
28 * is invoked before the request is handed to the request_fn. The goal of
29 * the function is to prepare a request for I/O, it can be used to build a
30 * cdb from the request data for instance.
31 *
32 */
33void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
34{
35 q->prep_rq_fn = pfn;
36}
86db1e29
JA
37EXPORT_SYMBOL(blk_queue_prep_rq);
38
28018c24
JB
39/**
40 * blk_queue_unprep_rq - set an unprepare_request function for queue
41 * @q: queue
42 * @ufn: unprepare_request function
43 *
44 * It's possible for a queue to register an unprepare_request callback
45 * which is invoked before the request is finally completed. The goal
46 * of the function is to deallocate any data that was allocated in the
47 * prepare_request callback.
48 *
49 */
50void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
51{
52 q->unprep_rq_fn = ufn;
53}
54EXPORT_SYMBOL(blk_queue_unprep_rq);
55
86db1e29
JA
56/**
57 * blk_queue_merge_bvec - set a merge_bvec function for queue
58 * @q: queue
59 * @mbfn: merge_bvec_fn
60 *
61 * Usually queues have static limitations on the max sectors or segments that
62 * we can put in a request. Stacking drivers may have some settings that
63 * are dynamic, and thus we have to query the queue whether it is ok to
64 * add a new bio_vec to a bio at a given offset or not. If the block device
65 * has such limitations, it needs to register a merge_bvec_fn to control
66 * the size of bio's sent to it. Note that a block device *must* allow a
67 * single page to be added to an empty bio. The block device driver may want
68 * to use the bio_split() function to deal with these bio's. By default
69 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
70 * honored.
71 */
72void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
73{
74 q->merge_bvec_fn = mbfn;
75}
86db1e29
JA
76EXPORT_SYMBOL(blk_queue_merge_bvec);
77
78void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
79{
80 q->softirq_done_fn = fn;
81}
86db1e29
JA
82EXPORT_SYMBOL(blk_queue_softirq_done);
83
242f9dcb
JA
84void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
85{
86 q->rq_timeout = timeout;
87}
88EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
89
90void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
91{
92 q->rq_timed_out_fn = fn;
93}
94EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
95
ef9e3fac
KU
96void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
97{
98 q->lld_busy_fn = fn;
99}
100EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
101
e475bba2
MP
102/**
103 * blk_set_default_limits - reset limits to default values
f740f5ca 104 * @lim: the queue_limits structure to reset
e475bba2
MP
105 *
106 * Description:
b1bd055d 107 * Returns a queue_limit struct to its default state.
e475bba2
MP
108 */
109void blk_set_default_limits(struct queue_limits *lim)
110{
8a78362c 111 lim->max_segments = BLK_MAX_SEGMENTS;
13f05c8d 112 lim->max_integrity_segments = 0;
e475bba2 113 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
eb28d31b 114 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
b1bd055d 115 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
4363ac7c 116 lim->max_write_same_sectors = 0;
86b37281
MP
117 lim->max_discard_sectors = 0;
118 lim->discard_granularity = 0;
119 lim->discard_alignment = 0;
120 lim->discard_misaligned = 0;
b1bd055d 121 lim->discard_zeroes_data = 0;
e475bba2 122 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
3a02c8e8 123 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
e475bba2
MP
124 lim->alignment_offset = 0;
125 lim->io_opt = 0;
126 lim->misaligned = 0;
e692cb66 127 lim->cluster = 1;
e475bba2
MP
128}
129EXPORT_SYMBOL(blk_set_default_limits);
130
b1bd055d
MP
131/**
132 * blk_set_stacking_limits - set default limits for stacking devices
133 * @lim: the queue_limits structure to reset
134 *
135 * Description:
136 * Returns a queue_limit struct to its default state. Should be used
137 * by stacking drivers like DM that have no internal limits.
138 */
139void blk_set_stacking_limits(struct queue_limits *lim)
140{
141 blk_set_default_limits(lim);
142
143 /* Inherit limits from component devices */
144 lim->discard_zeroes_data = 1;
145 lim->max_segments = USHRT_MAX;
146 lim->max_hw_sectors = UINT_MAX;
fe86cdce 147 lim->max_sectors = UINT_MAX;
4363ac7c 148 lim->max_write_same_sectors = UINT_MAX;
b1bd055d
MP
149}
150EXPORT_SYMBOL(blk_set_stacking_limits);
151
86db1e29
JA
152/**
153 * blk_queue_make_request - define an alternate make_request function for a device
154 * @q: the request queue for the device to be affected
155 * @mfn: the alternate make_request function
156 *
157 * Description:
158 * The normal way for &struct bios to be passed to a device
159 * driver is for them to be collected into requests on a request
160 * queue, and then to allow the device driver to select requests
161 * off that queue when it is ready. This works well for many block
162 * devices. However some block devices (typically virtual devices
163 * such as md or lvm) do not benefit from the processing on the
164 * request queue, and are served best by having the requests passed
165 * directly to them. This can be achieved by providing a function
166 * to blk_queue_make_request().
167 *
168 * Caveat:
169 * The driver that does this *must* be able to deal appropriately
170 * with buffers in "highmemory". This can be accomplished by either calling
171 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
172 * blk_queue_bounce() to create a buffer in normal memory.
173 **/
6728cb0e 174void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
86db1e29
JA
175{
176 /*
177 * set defaults
178 */
179 q->nr_requests = BLKDEV_MAX_RQ;
0e435ac2 180
86db1e29 181 q->make_request_fn = mfn;
86db1e29
JA
182 blk_queue_dma_alignment(q, 511);
183 blk_queue_congestion_threshold(q);
184 q->nr_batching = BLK_BATCH_REQ;
185
e475bba2
MP
186 blk_set_default_limits(&q->limits);
187
86db1e29
JA
188 /*
189 * by default assume old behaviour and bounce for any highmem page
190 */
191 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
192}
86db1e29
JA
193EXPORT_SYMBOL(blk_queue_make_request);
194
195/**
196 * blk_queue_bounce_limit - set bounce buffer limit for queue
cd0aca2d
TH
197 * @q: the request queue for the device
198 * @dma_mask: the maximum address the device can handle
86db1e29
JA
199 *
200 * Description:
201 * Different hardware can have different requirements as to what pages
202 * it can do I/O directly to. A low level driver can call
203 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
cd0aca2d 204 * buffers for doing I/O to pages residing above @dma_mask.
86db1e29 205 **/
cd0aca2d 206void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
86db1e29 207{
cd0aca2d 208 unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
86db1e29
JA
209 int dma = 0;
210
211 q->bounce_gfp = GFP_NOIO;
212#if BITS_PER_LONG == 64
cd0aca2d
TH
213 /*
214 * Assume anything <= 4GB can be handled by IOMMU. Actually
215 * some IOMMUs can handle everything, but I don't know of a
216 * way to test this here.
217 */
218 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
86db1e29 219 dma = 1;
efb012b3 220 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
86db1e29 221#else
6728cb0e 222 if (b_pfn < blk_max_low_pfn)
86db1e29 223 dma = 1;
c49825fa 224 q->limits.bounce_pfn = b_pfn;
260a67a9 225#endif
86db1e29
JA
226 if (dma) {
227 init_emergency_isa_pool();
228 q->bounce_gfp = GFP_NOIO | GFP_DMA;
260a67a9 229 q->limits.bounce_pfn = b_pfn;
86db1e29
JA
230 }
231}
86db1e29
JA
232EXPORT_SYMBOL(blk_queue_bounce_limit);
233
234/**
72d4cd9f
MS
235 * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
236 * @limits: the queue limits
2800aac1 237 * @max_hw_sectors: max hardware sectors in the usual 512b unit
86db1e29
JA
238 *
239 * Description:
2800aac1
MP
240 * Enables a low level driver to set a hard upper limit,
241 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
242 * the device driver based upon the combined capabilities of I/O
243 * controller and storage device.
244 *
245 * max_sectors is a soft limit imposed by the block layer for
246 * filesystem type requests. This value can be overridden on a
247 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
248 * The soft limit can not exceed max_hw_sectors.
86db1e29 249 **/
72d4cd9f 250void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
86db1e29 251{
2800aac1
MP
252 if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
253 max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
24c03d47 254 printk(KERN_INFO "%s: set to minimum %d\n",
2800aac1 255 __func__, max_hw_sectors);
86db1e29
JA
256 }
257
72d4cd9f
MS
258 limits->max_hw_sectors = max_hw_sectors;
259 limits->max_sectors = min_t(unsigned int, max_hw_sectors,
260 BLK_DEF_MAX_SECTORS);
261}
262EXPORT_SYMBOL(blk_limits_max_hw_sectors);
263
264/**
265 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
266 * @q: the request queue for the device
267 * @max_hw_sectors: max hardware sectors in the usual 512b unit
268 *
269 * Description:
270 * See description for blk_limits_max_hw_sectors().
271 **/
272void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
273{
274 blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
86db1e29 275}
086fa5ff 276EXPORT_SYMBOL(blk_queue_max_hw_sectors);
86db1e29 277
67efc925
CH
278/**
279 * blk_queue_max_discard_sectors - set max sectors for a single discard
280 * @q: the request queue for the device
c7ebf065 281 * @max_discard_sectors: maximum number of sectors to discard
67efc925
CH
282 **/
283void blk_queue_max_discard_sectors(struct request_queue *q,
284 unsigned int max_discard_sectors)
285{
286 q->limits.max_discard_sectors = max_discard_sectors;
287}
288EXPORT_SYMBOL(blk_queue_max_discard_sectors);
289
4363ac7c
MP
290/**
291 * blk_queue_max_write_same_sectors - set max sectors for a single write same
292 * @q: the request queue for the device
293 * @max_write_same_sectors: maximum number of sectors to write per command
294 **/
295void blk_queue_max_write_same_sectors(struct request_queue *q,
296 unsigned int max_write_same_sectors)
297{
298 q->limits.max_write_same_sectors = max_write_same_sectors;
299}
300EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
301
86db1e29 302/**
8a78362c 303 * blk_queue_max_segments - set max hw segments for a request for this queue
86db1e29
JA
304 * @q: the request queue for the device
305 * @max_segments: max number of segments
306 *
307 * Description:
308 * Enables a low level driver to set an upper limit on the number of
8a78362c 309 * hw data segments in a request.
86db1e29 310 **/
8a78362c 311void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
86db1e29
JA
312{
313 if (!max_segments) {
314 max_segments = 1;
24c03d47
HH
315 printk(KERN_INFO "%s: set to minimum %d\n",
316 __func__, max_segments);
86db1e29
JA
317 }
318
8a78362c 319 q->limits.max_segments = max_segments;
86db1e29 320}
8a78362c 321EXPORT_SYMBOL(blk_queue_max_segments);
86db1e29
JA
322
323/**
324 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
325 * @q: the request queue for the device
326 * @max_size: max size of segment in bytes
327 *
328 * Description:
329 * Enables a low level driver to set an upper limit on the size of a
330 * coalesced segment
331 **/
332void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
333{
334 if (max_size < PAGE_CACHE_SIZE) {
335 max_size = PAGE_CACHE_SIZE;
24c03d47
HH
336 printk(KERN_INFO "%s: set to minimum %d\n",
337 __func__, max_size);
86db1e29
JA
338 }
339
025146e1 340 q->limits.max_segment_size = max_size;
86db1e29 341}
86db1e29
JA
342EXPORT_SYMBOL(blk_queue_max_segment_size);
343
344/**
e1defc4f 345 * blk_queue_logical_block_size - set logical block size for the queue
86db1e29 346 * @q: the request queue for the device
e1defc4f 347 * @size: the logical block size, in bytes
86db1e29
JA
348 *
349 * Description:
e1defc4f
MP
350 * This should be set to the lowest possible block size that the
351 * storage device can address. The default of 512 covers most
352 * hardware.
86db1e29 353 **/
e1defc4f 354void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
86db1e29 355{
025146e1 356 q->limits.logical_block_size = size;
c72758f3
MP
357
358 if (q->limits.physical_block_size < size)
359 q->limits.physical_block_size = size;
360
361 if (q->limits.io_min < q->limits.physical_block_size)
362 q->limits.io_min = q->limits.physical_block_size;
86db1e29 363}
e1defc4f 364EXPORT_SYMBOL(blk_queue_logical_block_size);
86db1e29 365
c72758f3
MP
366/**
367 * blk_queue_physical_block_size - set physical block size for the queue
368 * @q: the request queue for the device
369 * @size: the physical block size, in bytes
370 *
371 * Description:
372 * This should be set to the lowest possible sector size that the
373 * hardware can operate on without reverting to read-modify-write
374 * operations.
375 */
892b6f90 376void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
c72758f3
MP
377{
378 q->limits.physical_block_size = size;
379
380 if (q->limits.physical_block_size < q->limits.logical_block_size)
381 q->limits.physical_block_size = q->limits.logical_block_size;
382
383 if (q->limits.io_min < q->limits.physical_block_size)
384 q->limits.io_min = q->limits.physical_block_size;
385}
386EXPORT_SYMBOL(blk_queue_physical_block_size);
387
388/**
389 * blk_queue_alignment_offset - set physical block alignment offset
390 * @q: the request queue for the device
8ebf9756 391 * @offset: alignment offset in bytes
c72758f3
MP
392 *
393 * Description:
394 * Some devices are naturally misaligned to compensate for things like
395 * the legacy DOS partition table 63-sector offset. Low-level drivers
396 * should call this function for devices whose first sector is not
397 * naturally aligned.
398 */
399void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
400{
401 q->limits.alignment_offset =
402 offset & (q->limits.physical_block_size - 1);
403 q->limits.misaligned = 0;
404}
405EXPORT_SYMBOL(blk_queue_alignment_offset);
406
7c958e32
MP
407/**
408 * blk_limits_io_min - set minimum request size for a device
409 * @limits: the queue limits
410 * @min: smallest I/O size in bytes
411 *
412 * Description:
413 * Some devices have an internal block size bigger than the reported
414 * hardware sector size. This function can be used to signal the
415 * smallest I/O the device can perform without incurring a performance
416 * penalty.
417 */
418void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
419{
420 limits->io_min = min;
421
422 if (limits->io_min < limits->logical_block_size)
423 limits->io_min = limits->logical_block_size;
424
425 if (limits->io_min < limits->physical_block_size)
426 limits->io_min = limits->physical_block_size;
427}
428EXPORT_SYMBOL(blk_limits_io_min);
429
c72758f3
MP
430/**
431 * blk_queue_io_min - set minimum request size for the queue
432 * @q: the request queue for the device
8ebf9756 433 * @min: smallest I/O size in bytes
c72758f3
MP
434 *
435 * Description:
7e5f5fb0
MP
436 * Storage devices may report a granularity or preferred minimum I/O
437 * size which is the smallest request the device can perform without
438 * incurring a performance penalty. For disk drives this is often the
439 * physical block size. For RAID arrays it is often the stripe chunk
440 * size. A properly aligned multiple of minimum_io_size is the
441 * preferred request size for workloads where a high number of I/O
442 * operations is desired.
c72758f3
MP
443 */
444void blk_queue_io_min(struct request_queue *q, unsigned int min)
445{
7c958e32 446 blk_limits_io_min(&q->limits, min);
c72758f3
MP
447}
448EXPORT_SYMBOL(blk_queue_io_min);
449
3c5820c7
MP
450/**
451 * blk_limits_io_opt - set optimal request size for a device
452 * @limits: the queue limits
453 * @opt: smallest I/O size in bytes
454 *
455 * Description:
456 * Storage devices may report an optimal I/O size, which is the
457 * device's preferred unit for sustained I/O. This is rarely reported
458 * for disk drives. For RAID arrays it is usually the stripe width or
459 * the internal track size. A properly aligned multiple of
460 * optimal_io_size is the preferred request size for workloads where
461 * sustained throughput is desired.
462 */
463void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
464{
465 limits->io_opt = opt;
466}
467EXPORT_SYMBOL(blk_limits_io_opt);
468
c72758f3
MP
469/**
470 * blk_queue_io_opt - set optimal request size for the queue
471 * @q: the request queue for the device
8ebf9756 472 * @opt: optimal request size in bytes
c72758f3
MP
473 *
474 * Description:
7e5f5fb0
MP
475 * Storage devices may report an optimal I/O size, which is the
476 * device's preferred unit for sustained I/O. This is rarely reported
477 * for disk drives. For RAID arrays it is usually the stripe width or
478 * the internal track size. A properly aligned multiple of
479 * optimal_io_size is the preferred request size for workloads where
480 * sustained throughput is desired.
c72758f3
MP
481 */
482void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
483{
3c5820c7 484 blk_limits_io_opt(&q->limits, opt);
c72758f3
MP
485}
486EXPORT_SYMBOL(blk_queue_io_opt);
487
86db1e29
JA
488/**
489 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
490 * @t: the stacking driver (top)
491 * @b: the underlying device (bottom)
492 **/
493void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
494{
fef24667 495 blk_stack_limits(&t->limits, &b->limits, 0);
86db1e29 496}
86db1e29
JA
497EXPORT_SYMBOL(blk_queue_stack_limits);
498
c72758f3
MP
499/**
500 * blk_stack_limits - adjust queue_limits for stacked devices
81744ee4
MP
501 * @t: the stacking driver limits (top device)
502 * @b: the underlying queue limits (bottom, component device)
e03a72e1 503 * @start: first data sector within component device
c72758f3
MP
504 *
505 * Description:
81744ee4
MP
506 * This function is used by stacking drivers like MD and DM to ensure
507 * that all component devices have compatible block sizes and
508 * alignments. The stacking driver must provide a queue_limits
509 * struct (top) and then iteratively call the stacking function for
510 * all component (bottom) devices. The stacking function will
511 * attempt to combine the values and ensure proper alignment.
512 *
513 * Returns 0 if the top and bottom queue_limits are compatible. The
514 * top device's block sizes and alignment offsets may be adjusted to
515 * ensure alignment with the bottom device. If no compatible sizes
516 * and alignments exist, -1 is returned and the resulting top
517 * queue_limits will have the misaligned flag set to indicate that
518 * the alignment_offset is undefined.
c72758f3
MP
519 */
520int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
e03a72e1 521 sector_t start)
c72758f3 522{
e03a72e1 523 unsigned int top, bottom, alignment, ret = 0;
86b37281 524
c72758f3
MP
525 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
526 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
4363ac7c
MP
527 t->max_write_same_sectors = min(t->max_write_same_sectors,
528 b->max_write_same_sectors);
77634f33 529 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
c72758f3
MP
530
531 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
532 b->seg_boundary_mask);
533
8a78362c 534 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
13f05c8d
MP
535 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
536 b->max_integrity_segments);
c72758f3
MP
537
538 t->max_segment_size = min_not_zero(t->max_segment_size,
539 b->max_segment_size);
540
fe0b393f
MP
541 t->misaligned |= b->misaligned;
542
e03a72e1 543 alignment = queue_limit_alignment_offset(b, start);
9504e086 544
81744ee4
MP
545 /* Bottom device has different alignment. Check that it is
546 * compatible with the current top alignment.
547 */
9504e086
MP
548 if (t->alignment_offset != alignment) {
549
550 top = max(t->physical_block_size, t->io_min)
551 + t->alignment_offset;
81744ee4 552 bottom = max(b->physical_block_size, b->io_min) + alignment;
9504e086 553
81744ee4 554 /* Verify that top and bottom intervals line up */
fe0b393f 555 if (max(top, bottom) & (min(top, bottom) - 1)) {
9504e086 556 t->misaligned = 1;
fe0b393f
MP
557 ret = -1;
558 }
9504e086
MP
559 }
560
c72758f3
MP
561 t->logical_block_size = max(t->logical_block_size,
562 b->logical_block_size);
563
564 t->physical_block_size = max(t->physical_block_size,
565 b->physical_block_size);
566
567 t->io_min = max(t->io_min, b->io_min);
9504e086
MP
568 t->io_opt = lcm(t->io_opt, b->io_opt);
569
e692cb66 570 t->cluster &= b->cluster;
98262f27 571 t->discard_zeroes_data &= b->discard_zeroes_data;
c72758f3 572
81744ee4 573 /* Physical block size a multiple of the logical block size? */
9504e086
MP
574 if (t->physical_block_size & (t->logical_block_size - 1)) {
575 t->physical_block_size = t->logical_block_size;
c72758f3 576 t->misaligned = 1;
fe0b393f 577 ret = -1;
86b37281
MP
578 }
579
81744ee4 580 /* Minimum I/O a multiple of the physical block size? */
9504e086
MP
581 if (t->io_min & (t->physical_block_size - 1)) {
582 t->io_min = t->physical_block_size;
583 t->misaligned = 1;
fe0b393f 584 ret = -1;
c72758f3
MP
585 }
586
81744ee4 587 /* Optimal I/O a multiple of the physical block size? */
9504e086
MP
588 if (t->io_opt & (t->physical_block_size - 1)) {
589 t->io_opt = 0;
590 t->misaligned = 1;
fe0b393f 591 ret = -1;
9504e086 592 }
c72758f3 593
81744ee4 594 /* Find lowest common alignment_offset */
9504e086
MP
595 t->alignment_offset = lcm(t->alignment_offset, alignment)
596 & (max(t->physical_block_size, t->io_min) - 1);
86b37281 597
81744ee4 598 /* Verify that new alignment_offset is on a logical block boundary */
fe0b393f 599 if (t->alignment_offset & (t->logical_block_size - 1)) {
c72758f3 600 t->misaligned = 1;
fe0b393f
MP
601 ret = -1;
602 }
c72758f3 603
9504e086
MP
604 /* Discard alignment and granularity */
605 if (b->discard_granularity) {
e03a72e1 606 alignment = queue_limit_discard_alignment(b, start);
9504e086
MP
607
608 if (t->discard_granularity != 0 &&
609 t->discard_alignment != alignment) {
610 top = t->discard_granularity + t->discard_alignment;
611 bottom = b->discard_granularity + alignment;
70dd5bf3 612
9504e086 613 /* Verify that top and bottom intervals line up */
8dd2cb7e 614 if ((max(top, bottom) % min(top, bottom)) != 0)
9504e086
MP
615 t->discard_misaligned = 1;
616 }
617
81744ee4
MP
618 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
619 b->max_discard_sectors);
9504e086
MP
620 t->discard_granularity = max(t->discard_granularity,
621 b->discard_granularity);
8dd2cb7e
SL
622 t->discard_alignment = lcm(t->discard_alignment, alignment) %
623 t->discard_granularity;
9504e086 624 }
70dd5bf3 625
fe0b393f 626 return ret;
c72758f3 627}
5d85d324 628EXPORT_SYMBOL(blk_stack_limits);
c72758f3 629
17be8c24
MP
630/**
631 * bdev_stack_limits - adjust queue limits for stacked drivers
632 * @t: the stacking driver limits (top device)
633 * @bdev: the component block_device (bottom)
634 * @start: first data sector within component device
635 *
636 * Description:
637 * Merges queue limits for a top device and a block_device. Returns
638 * 0 if alignment didn't change. Returns -1 if adding the bottom
639 * device caused misalignment.
640 */
641int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
642 sector_t start)
643{
644 struct request_queue *bq = bdev_get_queue(bdev);
645
646 start += get_start_sect(bdev);
647
e03a72e1 648 return blk_stack_limits(t, &bq->limits, start);
17be8c24
MP
649}
650EXPORT_SYMBOL(bdev_stack_limits);
651
c72758f3
MP
652/**
653 * disk_stack_limits - adjust queue limits for stacked drivers
77634f33 654 * @disk: MD/DM gendisk (top)
c72758f3
MP
655 * @bdev: the underlying block device (bottom)
656 * @offset: offset to beginning of data within component device
657 *
658 * Description:
e03a72e1
MP
659 * Merges the limits for a top level gendisk and a bottom level
660 * block_device.
c72758f3
MP
661 */
662void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
663 sector_t offset)
664{
665 struct request_queue *t = disk->queue;
c72758f3 666
e03a72e1 667 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
c72758f3
MP
668 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
669
670 disk_name(disk, 0, top);
671 bdevname(bdev, bottom);
672
673 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
674 top, bottom);
675 }
c72758f3
MP
676}
677EXPORT_SYMBOL(disk_stack_limits);
678
e3790c7d
TH
679/**
680 * blk_queue_dma_pad - set pad mask
681 * @q: the request queue for the device
682 * @mask: pad mask
683 *
27f8221a 684 * Set dma pad mask.
e3790c7d 685 *
27f8221a
FT
686 * Appending pad buffer to a request modifies the last entry of a
687 * scatter list such that it includes the pad buffer.
e3790c7d
TH
688 **/
689void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
690{
691 q->dma_pad_mask = mask;
692}
693EXPORT_SYMBOL(blk_queue_dma_pad);
694
27f8221a
FT
695/**
696 * blk_queue_update_dma_pad - update pad mask
697 * @q: the request queue for the device
698 * @mask: pad mask
699 *
700 * Update dma pad mask.
701 *
702 * Appending pad buffer to a request modifies the last entry of a
703 * scatter list such that it includes the pad buffer.
704 **/
705void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
706{
707 if (mask > q->dma_pad_mask)
708 q->dma_pad_mask = mask;
709}
710EXPORT_SYMBOL(blk_queue_update_dma_pad);
711
86db1e29
JA
712/**
713 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
86db1e29 714 * @q: the request queue for the device
2fb98e84 715 * @dma_drain_needed: fn which returns non-zero if drain is necessary
86db1e29
JA
716 * @buf: physically contiguous buffer
717 * @size: size of the buffer in bytes
718 *
719 * Some devices have excess DMA problems and can't simply discard (or
720 * zero fill) the unwanted piece of the transfer. They have to have a
721 * real area of memory to transfer it into. The use case for this is
722 * ATAPI devices in DMA mode. If the packet command causes a transfer
723 * bigger than the transfer size some HBAs will lock up if there
724 * aren't DMA elements to contain the excess transfer. What this API
725 * does is adjust the queue so that the buf is always appended
726 * silently to the scatterlist.
727 *
8a78362c
MP
728 * Note: This routine adjusts max_hw_segments to make room for appending
729 * the drain buffer. If you call blk_queue_max_segments() after calling
730 * this routine, you must set the limit to one fewer than your device
731 * can support otherwise there won't be room for the drain buffer.
86db1e29 732 */
448da4d2 733int blk_queue_dma_drain(struct request_queue *q,
2fb98e84
TH
734 dma_drain_needed_fn *dma_drain_needed,
735 void *buf, unsigned int size)
86db1e29 736{
8a78362c 737 if (queue_max_segments(q) < 2)
86db1e29
JA
738 return -EINVAL;
739 /* make room for appending the drain */
8a78362c 740 blk_queue_max_segments(q, queue_max_segments(q) - 1);
2fb98e84 741 q->dma_drain_needed = dma_drain_needed;
86db1e29
JA
742 q->dma_drain_buffer = buf;
743 q->dma_drain_size = size;
744
745 return 0;
746}
86db1e29
JA
747EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
748
749/**
750 * blk_queue_segment_boundary - set boundary rules for segment merging
751 * @q: the request queue for the device
752 * @mask: the memory boundary mask
753 **/
754void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
755{
756 if (mask < PAGE_CACHE_SIZE - 1) {
757 mask = PAGE_CACHE_SIZE - 1;
24c03d47
HH
758 printk(KERN_INFO "%s: set to minimum %lx\n",
759 __func__, mask);
86db1e29
JA
760 }
761
025146e1 762 q->limits.seg_boundary_mask = mask;
86db1e29 763}
86db1e29
JA
764EXPORT_SYMBOL(blk_queue_segment_boundary);
765
766/**
767 * blk_queue_dma_alignment - set dma length and memory alignment
768 * @q: the request queue for the device
769 * @mask: alignment mask
770 *
771 * description:
710027a4 772 * set required memory and length alignment for direct dma transactions.
8feb4d20 773 * this is used when building direct io requests for the queue.
86db1e29
JA
774 *
775 **/
776void blk_queue_dma_alignment(struct request_queue *q, int mask)
777{
778 q->dma_alignment = mask;
779}
86db1e29
JA
780EXPORT_SYMBOL(blk_queue_dma_alignment);
781
782/**
783 * blk_queue_update_dma_alignment - update dma length and memory alignment
784 * @q: the request queue for the device
785 * @mask: alignment mask
786 *
787 * description:
710027a4 788 * update required memory and length alignment for direct dma transactions.
86db1e29
JA
789 * If the requested alignment is larger than the current alignment, then
790 * the current queue alignment is updated to the new value, otherwise it
791 * is left alone. The design of this is to allow multiple objects
792 * (driver, device, transport etc) to set their respective
793 * alignments without having them interfere.
794 *
795 **/
796void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
797{
798 BUG_ON(mask > PAGE_SIZE);
799
800 if (mask > q->dma_alignment)
801 q->dma_alignment = mask;
802}
86db1e29
JA
803EXPORT_SYMBOL(blk_queue_update_dma_alignment);
804
4913efe4
TH
805/**
806 * blk_queue_flush - configure queue's cache flush capability
807 * @q: the request queue for the device
808 * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
809 *
810 * Tell block layer cache flush capability of @q. If it supports
811 * flushing, REQ_FLUSH should be set. If it supports bypassing
812 * write cache for individual writes, REQ_FUA should be set.
813 */
814void blk_queue_flush(struct request_queue *q, unsigned int flush)
815{
816 WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
817
818 if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
819 flush &= ~REQ_FUA;
820
821 q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
822}
823EXPORT_SYMBOL_GPL(blk_queue_flush);
824
f3876930 825void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
826{
827 q->flush_not_queueable = !queueable;
828}
829EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
830
aeb3d3a8 831static int __init blk_settings_init(void)
86db1e29
JA
832{
833 blk_max_low_pfn = max_low_pfn - 1;
834 blk_max_pfn = max_pfn - 1;
835 return 0;
836}
837subsys_initcall(blk_settings_init);