blk_end_request: changing block layer core (take 4)
[deliverable/linux.git] / block / ll_rw_blk.c
index 3d489915fd22841c65c76083e401ac7ed7b60d77..fb951198c70e7d1cf423f6051c4d79617e3f35ec 100644 (file)
@@ -347,7 +347,6 @@ unsigned blk_ordered_req_seq(struct request *rq)
 void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
 {
        struct request *rq;
-       int uptodate;
 
        if (error && !q->orderr)
                q->orderr = error;
@@ -361,15 +360,11 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
        /*
         * Okay, sequence complete.
         */
-       uptodate = 1;
-       if (q->orderr)
-               uptodate = q->orderr;
-
        q->ordseq = 0;
        rq = q->orig_bar_rq;
 
-       end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
-       end_that_request_last(rq, uptodate);
+       if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
+               BUG();
 }
 
 static void pre_flush_end_io(struct request *rq, int error)
@@ -486,9 +481,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
                         * ORDERED_NONE while this request is on it.
                         */
                        blkdev_dequeue_request(rq);
-                       end_that_request_first(rq, -EOPNOTSUPP,
-                                              rq->hard_nr_sectors);
-                       end_that_request_last(rq, -EOPNOTSUPP);
+                       if (__blk_end_request(rq, -EOPNOTSUPP,
+                                             blk_rq_bytes(rq)))
+                               BUG();
                        *rqp = NULL;
                        return 0;
                }
@@ -759,6 +754,30 @@ void blk_queue_dma_alignment(struct request_queue *q, int mask)
 
 EXPORT_SYMBOL(blk_queue_dma_alignment);
 
+/**
+ * blk_queue_update_dma_alignment - update dma length and memory alignment
+ * @q:     the request queue for the device
+ * @mask:  alignment mask
+ *
+ * description:
+ *    update required memory and length aligment for direct dma transactions.
+ *    If the requested alignment is larger than the current alignment, then
+ *    the current queue alignment is updated to the new value, otherwise it
+ *    is left alone.  The design of this is to allow multiple objects
+ *    (driver, device, transport etc) to set their respective
+ *    alignments without having them interfere.
+ *
+ **/
+void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
+{
+       BUG_ON(mask > PAGE_SIZE);
+
+       if (mask > q->dma_alignment)
+               q->dma_alignment = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_update_dma_alignment);
+
 /**
  * blk_queue_find_tag - find a request by its tag and queue
  * @q:  The request queue for the device
@@ -1621,15 +1640,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
 {
        struct request_queue *q = bdi->unplug_io_data;
 
-       /*
-        * devices don't necessarily have an ->unplug_fn defined
-        */
-       if (q->unplug_fn) {
-               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
-                                       q->rq.count[READ] + q->rq.count[WRITE]);
-
-               q->unplug_fn(q);
-       }
+       blk_unplug(q);
 }
 
 static void blk_unplug_work(struct work_struct *work)
@@ -1653,6 +1664,20 @@ static void blk_unplug_timeout(unsigned long data)
        kblockd_schedule_work(&q->unplug_work);
 }
 
+void blk_unplug(struct request_queue *q)
+{
+       /*
+        * devices don't necessarily have an ->unplug_fn defined
+        */
+       if (q->unplug_fn) {
+               blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL,
+                                       q->rq.count[READ] + q->rq.count[WRITE]);
+
+               q->unplug_fn(q);
+       }
+}
+EXPORT_SYMBOL(blk_unplug);
+
 /**
  * blk_start_queue - restart a previously stopped queue
  * @q:    The &struct request_queue in question
@@ -1856,9 +1881,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 
        init_timer(&q->unplug_timer);
 
-       kobject_set_name(&q->kobj, "%s", "queue");
-       q->kobj.ktype = &queue_ktype;
-       kobject_init(&q->kobj);
+       kobject_init(&q->kobj, &queue_ktype);
 
        mutex_init(&q->sysfs_lock);
 
@@ -3685,23 +3708,42 @@ void end_that_request_last(struct request *req, int uptodate)
 EXPORT_SYMBOL(end_that_request_last);
 
 static inline void __end_request(struct request *rq, int uptodate,
-                                unsigned int nr_bytes, int dequeue)
+                                unsigned int nr_bytes)
 {
-       if (!end_that_request_chunk(rq, uptodate, nr_bytes)) {
-               if (dequeue)
-                       blkdev_dequeue_request(rq);
-               add_disk_randomness(rq->rq_disk);
-               end_that_request_last(rq, uptodate);
-       }
+       int error = 0;
+
+       if (uptodate <= 0)
+               error = uptodate ? uptodate : -EIO;
+
+       __blk_end_request(rq, error, nr_bytes);
 }
 
-static unsigned int rq_byte_size(struct request *rq)
+/**
+ * blk_rq_bytes - Returns bytes left to complete in the entire request
+ **/
+unsigned int blk_rq_bytes(struct request *rq)
 {
        if (blk_fs_request(rq))
                return rq->hard_nr_sectors << 9;
 
        return rq->data_len;
 }
+EXPORT_SYMBOL_GPL(blk_rq_bytes);
+
+/**
+ * blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ **/
+unsigned int blk_rq_cur_bytes(struct request *rq)
+{
+       if (blk_fs_request(rq))
+               return rq->current_nr_sectors << 9;
+
+       if (rq->bio)
+               return rq->bio->bi_size;
+
+       return rq->data_len;
+}
+EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
 
 /**
  * end_queued_request - end all I/O on a queued request
@@ -3716,7 +3758,7 @@ static unsigned int rq_byte_size(struct request *rq)
  **/
 void end_queued_request(struct request *rq, int uptodate)
 {
-       __end_request(rq, uptodate, rq_byte_size(rq), 1);
+       __end_request(rq, uptodate, blk_rq_bytes(rq));
 }
 EXPORT_SYMBOL(end_queued_request);
 
@@ -3733,7 +3775,7 @@ EXPORT_SYMBOL(end_queued_request);
  **/
 void end_dequeued_request(struct request *rq, int uptodate)
 {
-       __end_request(rq, uptodate, rq_byte_size(rq), 0);
+       __end_request(rq, uptodate, blk_rq_bytes(rq));
 }
 EXPORT_SYMBOL(end_dequeued_request);
 
@@ -3759,10 +3801,106 @@ EXPORT_SYMBOL(end_dequeued_request);
  **/
 void end_request(struct request *req, int uptodate)
 {
-       __end_request(req, uptodate, req->hard_cur_sectors << 9, 1);
+       __end_request(req, uptodate, req->hard_cur_sectors << 9);
 }
 EXPORT_SYMBOL(end_request);
 
+static void complete_request(struct request *rq, int error)
+{
+       /*
+        * REMOVEME: This conversion is transitional and will be removed
+        *           when old end_that_request_* are unexported.
+        */
+       int uptodate = 1;
+       if (error)
+               uptodate = (error == -EIO) ? 0 : error;
+
+       if (blk_rq_tagged(rq))
+               blk_queue_end_tag(rq->q, rq);
+
+       if (blk_queued_rq(rq))
+               blkdev_dequeue_request(rq);
+
+       end_that_request_last(rq, uptodate);
+}
+
+/**
+ * blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    0 for success, < 0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @rq.
+ *     If @rq has leftover, sets it up for the next range of segments.
+ *
+ * Return:
+ *     0 - we are done with this request
+ *     1 - still buffers pending for this request
+ **/
+int blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+       struct request_queue *q = rq->q;
+       unsigned long flags = 0UL;
+       /*
+        * REMOVEME: This conversion is transitional and will be removed
+        *           when old end_that_request_* are unexported.
+        */
+       int uptodate = 1;
+       if (error)
+               uptodate = (error == -EIO) ? 0 : error;
+
+       if (blk_fs_request(rq) || blk_pc_request(rq)) {
+               if (__end_that_request_first(rq, uptodate, nr_bytes))
+                       return 1;
+       }
+
+       add_disk_randomness(rq->rq_disk);
+
+       spin_lock_irqsave(q->queue_lock, flags);
+       complete_request(rq, error);
+       spin_unlock_irqrestore(q->queue_lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(blk_end_request);
+
+/**
+ * __blk_end_request - Helper function for drivers to complete the request.
+ * @rq:       the request being processed
+ * @error:    0 for success, < 0 for error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Must be called with queue lock held unlike blk_end_request().
+ *
+ * Return:
+ *     0 - we are done with this request
+ *     1 - still buffers pending for this request
+ **/
+int __blk_end_request(struct request *rq, int error, int nr_bytes)
+{
+       /*
+        * REMOVEME: This conversion is transitional and will be removed
+        *           when old end_that_request_* are unexported.
+        */
+       int uptodate = 1;
+       if (error)
+               uptodate = (error == -EIO) ? 0 : error;
+
+       if (blk_fs_request(rq) || blk_pc_request(rq)) {
+               if (__end_that_request_first(rq, uptodate, nr_bytes))
+                       return 1;
+       }
+
+       add_disk_randomness(rq->rq_disk);
+
+       complete_request(rq, error);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(__blk_end_request);
+
 static void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                            struct bio *bio)
 {
@@ -4074,23 +4212,7 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
-static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
-{
-       return queue_var_show(q->max_phys_segments, page);
-}
-
-static ssize_t queue_max_segments_store(struct request_queue *q,
-                                       const char *page, size_t count)
-{
-       unsigned long segments;
-       ssize_t ret = queue_var_store(&segments, page, count);
-
-       spin_lock_irq(q->queue_lock);
-       q->max_phys_segments = segments;
-       spin_unlock_irq(q->queue_lock);
 
-       return ret;
-}
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
        .show = queue_requests_show,
@@ -4114,12 +4236,6 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
        .show = queue_max_hw_sectors_show,
 };
 
-static struct queue_sysfs_entry queue_max_segments_entry = {
-       .attr = {.name = "max_segments", .mode = S_IRUGO | S_IWUSR },
-       .show = queue_max_segments_show,
-       .store = queue_max_segments_store,
-};
-
 static struct queue_sysfs_entry queue_iosched_entry = {
        .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
        .show = elv_iosched_show,
@@ -4131,7 +4247,6 @@ static struct attribute *default_attrs[] = {
        &queue_ra_entry.attr,
        &queue_max_hw_sectors_entry.attr,
        &queue_max_sectors_entry.attr,
-       &queue_max_segments_entry.attr,
        &queue_iosched_entry.attr,
        NULL,
 };
@@ -4199,9 +4314,8 @@ int blk_register_queue(struct gendisk *disk)
        if (!q || !q->request_fn)
                return -ENXIO;
 
-       q->kobj.parent = kobject_get(&disk->kobj);
-
-       ret = kobject_add(&q->kobj);
+       ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj),
+                         "%s", "queue");
        if (ret < 0)
                return ret;
 
@@ -4226,6 +4340,6 @@ void blk_unregister_queue(struct gendisk *disk)
 
                kobject_uevent(&q->kobj, KOBJ_REMOVE);
                kobject_del(&q->kobj);
-               kobject_put(&disk->kobj);
+               kobject_put(&disk->dev.kobj);
        }
 }
This page took 0.029993 seconds and 5 git commands to generate.