block: Use accessor functions for queue limits
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / block / blk-settings.c
index 69c42adde52bafb61913d127b9d867e69e45a0c4..0b32f984eed24ffdfd391587367428be63a83253 100644 (file)
@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
-       blk_queue_hardsect_size(q, 512);
+       blk_queue_logical_block_size(q, 512);
        blk_queue_dma_alignment(q, 511);
        blk_queue_congestion_threshold(q);
        q->nr_batching = BLK_BATCH_REQ;
@@ -156,26 +156,28 @@ EXPORT_SYMBOL(blk_queue_make_request);
 
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q:  the request queue for the device
- * @dma_addr:   bus address limit
+ * @q: the request queue for the device
+ * @dma_mask: the maximum address the device can handle
  *
  * Description:
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @dma_addr.
+ *    buffers for doing I/O to pages residing above @dma_mask.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
 {
-       unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
+       unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
        int dma = 0;
 
        q->bounce_gfp = GFP_NOIO;
 #if BITS_PER_LONG == 64
-       /* Assume anything <= 4GB can be handled by IOMMU.
-          Actually some IOMMUs can handle everything, but I don't
-          know of a way to test this here. */
-       if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+       /*
+        * Assume anything <= 4GB can be handled by IOMMU.  Actually
+        * some IOMMUs can handle everything, but I don't know of a
+        * way to test this here.
+        */
+       if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
                dma = 1;
        q->bounce_pfn = max_low_pfn;
 #else
@@ -217,6 +219,15 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 }
 EXPORT_SYMBOL(blk_queue_max_sectors);
 
+void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors)
+{
+       if (BLK_DEF_MAX_SECTORS > max_sectors)
+               q->max_hw_sectors = BLK_DEF_MAX_SECTORS;
+       else
+               q->max_hw_sectors = max_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_hw_sectors);
+
 /**
  * blk_queue_max_phys_segments - set max phys segments for a request for this queue
  * @q:  the request queue for the device
@@ -286,21 +297,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 EXPORT_SYMBOL(blk_queue_max_segment_size);
 
 /**
- * blk_queue_hardsect_size - set hardware sector size for the queue
+ * blk_queue_logical_block_size - set logical block size for the queue
  * @q:  the request queue for the device
- * @size:  the hardware sector size, in bytes
+ * @size:  the logical block size, in bytes
  *
  * Description:
- *   This should typically be set to the lowest possible sector size
- *   that the hardware can operate on (possible without reverting to
- *   even internal read-modify-write operations). Usually the default
- *   of 512 covers most hardware.
+ *   This should be set to the lowest possible block size that the
+ *   storage device can address.  The default of 512 covers most
+ *   hardware.
  **/
-void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
 {
-       q->hardsect_size = size;
+       q->logical_block_size = size;
 }
-EXPORT_SYMBOL(blk_queue_hardsect_size);
+EXPORT_SYMBOL(blk_queue_logical_block_size);
 
 /*
  * Returns the minimum that is _not_ zero, unless both are zero.
@@ -322,7 +332,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
        t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
        t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
        t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
-       t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
+       t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
        if (!t->queue_lock)
                WARN_ON_ONCE(1);
        else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
@@ -394,11 +404,11 @@ int blk_queue_dma_drain(struct request_queue *q,
                               dma_drain_needed_fn *dma_drain_needed,
                               void *buf, unsigned int size)
 {
-       if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
+       if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2)
                return -EINVAL;
        /* make room for appending the drain */
-       --q->max_hw_segments;
-       --q->max_phys_segments;
+       blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1);
+       blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1);
        q->dma_drain_needed = dma_drain_needed;
        q->dma_drain_buffer = buf;
        q->dma_drain_size = size;