unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
+ unsigned int max_discard_sectors;
+ unsigned int discard_granularity;
+ unsigned int discard_alignment;
unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char misaligned;
+ unsigned char discard_misaligned;
unsigned char no_cluster;
};
return bdev->bd_disk->queue;
}
-static inline void blk_run_backing_dev(struct backing_dev_info *bdi,
- struct page *page)
-{
- if (bdi && bdi->unplug_io_fn)
- bdi->unplug_io_fn(bdi, page);
-}
-
-static inline void blk_run_address_space(struct address_space *mapping)
-{
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, NULL);
-}
-
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
+extern void blk_queue_max_discard_sectors(struct request_queue *q,
+ unsigned int max_discard_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_alignment_offset(struct request_queue *q,
return q->limits.physical_block_size;
}
+static inline int bdev_physical_block_size(struct block_device *bdev)
+{
+ return queue_physical_block_size(bdev_get_queue(bdev));
+}
+
static inline unsigned int queue_io_min(struct request_queue *q)
{
return q->limits.io_min;
}
+static inline int bdev_io_min(struct block_device *bdev)
+{
+ return queue_io_min(bdev_get_queue(bdev));
+}
+
static inline unsigned int queue_io_opt(struct request_queue *q)
{
return q->limits.io_opt;
}
+static inline int bdev_io_opt(struct block_device *bdev)
+{
+ return queue_io_opt(bdev_get_queue(bdev));
+}
+
static inline int queue_alignment_offset(struct request_queue *q)
{
- if (q && q->limits.misaligned)
+ if (q->limits.misaligned)
return -1;
- if (q && q->limits.alignment_offset)
- return q->limits.alignment_offset;
-
- return 0;
+ return q->limits.alignment_offset;
}
static inline int queue_sector_alignment_offset(struct request_queue *q,
& (q->limits.io_min - 1);
}
+static inline int bdev_alignment_offset(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q->limits.misaligned)
+ return -1;
+
+ if (bdev != bdev->bd_contains)
+ return bdev->bd_part->alignment_offset;
+
+ return q->limits.alignment_offset;
+}
+
+static inline int queue_discard_alignment(struct request_queue *q)
+{
+ if (q->limits.discard_misaligned)
+ return -1;
+
+ return q->limits.discard_alignment;
+}
+
+static inline int queue_sector_discard_alignment(struct request_queue *q,
+ sector_t sector)
+{
+ return ((sector << 9) - q->limits.discard_alignment)
+ & (q->limits.discard_granularity - 1);
+}
+
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;