/*
* Okay, sequence complete.
*/
- rq = q->orig_bar_rq;
- uptodate = q->orderr ? q->orderr : 1;
+ uptodate = 1;
+ if (q->orderr)
+ uptodate = q->orderr;
q->ordseq = 0;
+ rq = q->orig_bar_rq;
end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
end_that_request_last(rq, uptodate);
rq_init(q, rq);
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
- rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
+ if (q->ordered & QUEUE_ORDERED_FUA)
+ rq->cmd_flags |= REQ_FUA;
rq->elevator_private = NULL;
rq->elevator_private2 = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
init_timer(&q->unplug_timer);
- snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+ kobject_set_name(&q->kobj, "%s", "queue");
q->kobj.ktype = &queue_ktype;
kobject_init(&q->kobj);
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+/*
+ * Check whether this bio extends beyond the end of the device.
+ */
+static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
+{
+ sector_t maxsector;
+
+ if (!nr_sectors)
+ return 0;
+
+ /* Test device or partition size, when known. */
+ maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+ if (maxsector) {
+ sector_t sector = bio->bi_sector;
+
+ if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+ /*
+ * This may well happen - the kernel calls bread()
+ * without checking the size of the device, e.g., when
+ * mounting a device.
+ */
+ handle_bad_sector(bio);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/**
* generic_make_request: hand a buffer to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
static inline void __generic_make_request(struct bio *bio)
{
struct request_queue *q;
- sector_t maxsector;
sector_t old_sector;
int ret, nr_sectors = bio_sectors(bio);
dev_t old_dev;
might_sleep();
- /* Test device or partition size, when known. */
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
- sector_t sector = bio->bi_sector;
- if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
- /*
- * This may well happen - the kernel calls bread()
- * without checking the size of the device, e.g., when
- * mounting a device.
- */
- handle_bad_sector(bio);
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
/*
* Resolve the mapping until finished. (drivers are
break;
}
- if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+ if (unlikely(nr_sectors > q->max_hw_sectors)) {
printk("bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
blk_partition_remap(bio);
if (old_sector != -1)
- blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
+ blk_add_trace_remap(q, bio, old_dev, bio->bi_sector,
old_sector);
blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
old_sector = bio->bi_sector;
old_dev = bio->bi_bdev->bd_dev;
- maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
- if (maxsector) {
- sector_t sector = bio->bi_sector;
-
- if (maxsector < nr_sectors ||
- maxsector - nr_sectors < sector) {
- /*
- * This may well happen - partitions are not
- * checked to make sure they are within the size
- * of the whole device.
- */
- handle_bad_sector(bio);
- goto end_io;
- }
- }
+ if (bio_check_eod(bio, nr_sectors))
+ goto end_io;
ret = q->make_request_fn(q, bio);
} while (ret);
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
- * through requeueing. Theh actual completion happens out-of-order,
+ * through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
EXPORT_SYMBOL(end_that_request_last);
-void end_request(struct request *req, int uptodate)
+static inline void __end_request(struct request *rq, int uptodate,
+ unsigned int nr_bytes, int dequeue)
{
- if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
- add_disk_randomness(req->rq_disk);
- blkdev_dequeue_request(req);
- end_that_request_last(req, uptodate);
+ if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
+ if (dequeue)
+ blkdev_dequeue_request(rq);
+ add_disk_randomness(rq->rq_disk);
+ end_that_request_last(rq, uptodate);
}
}
+static unsigned int rq_byte_size(struct request *rq)
+{
+ if (blk_fs_request(rq))
+ return rq->hard_nr_sectors << 9;
+
+ return rq->data_len;
+}
+
+/**
+ * end_queued_request - end all I/O on a queued request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends all I/O on a request, and removes it from the block layer queues.
+ * Not suitable for normal IO completion, unless the driver still has
+ * the request attached to the block layer.
+ *
+ **/
+void end_queued_request(struct request *rq, int uptodate)
+{
+ __end_request(rq, uptodate, rq_byte_size(rq), 1);
+}
+EXPORT_SYMBOL(end_queued_request);
+
+/**
+ * end_dequeued_request - end all I/O on a dequeued request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends all I/O on a request. The request must already have been
+ * dequeued using blkdev_dequeue_request(), as is normally the case
+ * for most drivers.
+ *
+ **/
+void end_dequeued_request(struct request *rq, int uptodate)
+{
+ __end_request(rq, uptodate, rq_byte_size(rq), 0);
+}
+EXPORT_SYMBOL(end_dequeued_request);
+
+
+/**
+ * end_request - end I/O on the current segment of the request
+ * @rq: the request being processed
+ * @uptodate: error value or 0/1 uptodate flag
+ *
+ * Description:
+ * Ends I/O on the current segment of a request. If that is the only
+ * remaining segment, the request is also completed and freed.
+ *
+ * This is a remnant of how older block drivers handled IO completions.
+ * Modern drivers typically end IO on the full request in one go, unless
+ * they have a residual value to account for. For that case this function
+ * isn't really useful, unless the residual just happens to be the
+ * full current segment. In other words, don't use this function in new
+ * code. Either use end_request_completely(), or the
+ * end_that_request_chunk() (along with end_that_request_last()) for
+ * partial completions.
+ *
+ **/
+void end_request(struct request *req, int uptodate)
+{
+ __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+}
EXPORT_SYMBOL(end_request);
static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,