block: Do away with the notion of hardsect_size
[deliverable/linux.git] / block / blk-settings.c
index 69c42adde52bafb61913d127b9d867e69e45a0c4..15c3164537b8fbd75bcfb306167ca7d6dbd0a675 100644 (file)
@@ -134,7 +134,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        blk_queue_max_sectors(q, SAFE_MAX_SECTORS);
-       blk_queue_hardsect_size(q, 512);
+       blk_queue_logical_block_size(q, 512);
        blk_queue_dma_alignment(q, 511);
        blk_queue_congestion_threshold(q);
        q->nr_batching = BLK_BATCH_REQ;
@@ -156,26 +156,28 @@ EXPORT_SYMBOL(blk_queue_make_request);
 
 /**
  * blk_queue_bounce_limit - set bounce buffer limit for queue
- * @q:  the request queue for the device
- * @dma_addr:   bus address limit
+ * @q: the request queue for the device
+ * @dma_mask: the maximum address the device can handle
  *
  * Description:
  *    Different hardware can have different requirements as to what pages
  *    it can do I/O directly to. A low level driver can call
  *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
- *    buffers for doing I/O to pages residing above @dma_addr.
+ *    buffers for doing I/O to pages residing above @dma_mask.
  **/
-void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
+void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask)
 {
-       unsigned long b_pfn = dma_addr >> PAGE_SHIFT;
+       unsigned long b_pfn = dma_mask >> PAGE_SHIFT;
        int dma = 0;
 
        q->bounce_gfp = GFP_NOIO;
 #if BITS_PER_LONG == 64
-       /* Assume anything <= 4GB can be handled by IOMMU.
-          Actually some IOMMUs can handle everything, but I don't
-          know of a way to test this here. */
-       if (b_pfn < (min_t(u64, 0x100000000UL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+       /*
+        * Assume anything <= 4GB can be handled by IOMMU.  Actually
+        * some IOMMUs can handle everything, but I don't know of a
+        * way to test this here.
+        */
+       if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
                dma = 1;
        q->bounce_pfn = max_low_pfn;
 #else
@@ -286,21 +288,20 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 EXPORT_SYMBOL(blk_queue_max_segment_size);
 
 /**
- * blk_queue_hardsect_size - set hardware sector size for the queue
+ * blk_queue_logical_block_size - set logical block size for the queue
  * @q:  the request queue for the device
- * @size:  the hardware sector size, in bytes
+ * @size:  the logical block size, in bytes
  *
  * Description:
- *   This should typically be set to the lowest possible sector size
- *   that the hardware can operate on (possible without reverting to
- *   even internal read-modify-write operations). Usually the default
- *   of 512 covers most hardware.
+ *   This should be set to the lowest possible block size that the
+ *   storage device can address.  The default of 512 covers most
+ *   hardware.
  **/
-void blk_queue_hardsect_size(struct request_queue *q, unsigned short size)
+void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
 {
-       q->hardsect_size = size;
+       q->logical_block_size = size;
 }
-EXPORT_SYMBOL(blk_queue_hardsect_size);
+EXPORT_SYMBOL(blk_queue_logical_block_size);
 
 /*
  * Returns the minimum that is _not_ zero, unless both are zero.
@@ -322,7 +323,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
        t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
        t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
        t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
-       t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
+       t->logical_block_size = max(t->logical_block_size, b->logical_block_size);
        if (!t->queue_lock)
                WARN_ON_ONCE(1);
        else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) {
This page took 0.025896 seconds and 5 git commands to generate.