block: add missing blk_queue_dead() checks
[deliverable/linux.git] / block / blk-core.c
index f43c8a5840ae488131dbf395d5eaf96e2fea876d..30add45a87efb1fdcfe066f22a6f48ab4790c1ed 100644 (file)
@@ -358,7 +358,8 @@ EXPORT_SYMBOL(blk_put_queue);
 void blk_drain_queue(struct request_queue *q, bool drain_all)
 {
        while (true) {
-               int nr_rqs;
+               bool drain = false;
+               int i;
 
                spin_lock_irq(q->queue_lock);
 
@@ -368,14 +369,25 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
 
                __blk_run_queue(q);
 
-               if (drain_all)
-                       nr_rqs = q->rq.count[0] + q->rq.count[1];
-               else
-                       nr_rqs = q->rq.elvpriv;
+               drain |= q->rq.elvpriv;
+
+               /*
+                * Unfortunately, requests are queued at and tracked from
+                * multiple places and there's no single counter which can
+                * be drained.  Check all the queues and counters.
+                */
+               if (drain_all) {
+                       drain |= !list_empty(&q->queue_head);
+                       for (i = 0; i < 2; i++) {
+                               drain |= q->rq.count[i];
+                               drain |= q->in_flight[i];
+                               drain |= !list_empty(&q->flush_queue[i]);
+                       }
+               }
 
                spin_unlock_irq(q->queue_lock);
 
-               if (!nr_rqs)
+               if (!drain)
                        break;
                msleep(10);
        }
@@ -608,7 +620,7 @@ EXPORT_SYMBOL(blk_init_allocated_queue_node);
 
 int blk_get_queue(struct request_queue *q)
 {
-       if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+       if (likely(!blk_queue_dead(q))) {
                kobject_get(&q->kobj);
                return 0;
        }
@@ -755,7 +767,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
        const bool is_sync = rw_is_sync(rw_flags) != 0;
        int may_queue;
 
-       if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+       if (unlikely(blk_queue_dead(q)))
                return NULL;
 
        may_queue = elv_may_queue(q, rw_flags);
@@ -875,7 +887,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags,
                struct io_context *ioc;
                struct request_list *rl = &q->rq;
 
-               if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)))
+               if (unlikely(blk_queue_dead(q)))
                        return NULL;
 
                prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
@@ -1010,54 +1022,6 @@ static void add_acct_request(struct request_queue *q, struct request *rq,
        __elv_add_request(q, rq, where);
 }
 
-/**
- * blk_insert_request - insert a special request into a request queue
- * @q:         request queue where request should be inserted
- * @rq:                request to be inserted
- * @at_head:   insert request at head or tail of queue
- * @data:      private data
- *
- * Description:
- *    Many block devices need to execute commands asynchronously, so they don't
- *    block the whole kernel from preemption during request execution.  This is
- *    accomplished normally by inserting aritficial requests tagged as
- *    REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them
- *    be scheduled for actual execution by the request queue.
- *
- *    We have the option of inserting the head or the tail of the queue.
- *    Typically we use the tail for new ioctls and so forth.  We use the head
- *    of the queue for things like a QUEUE_FULL message from a device, or a
- *    host that is unable to accept a particular command.
- */
-void blk_insert_request(struct request_queue *q, struct request *rq,
-                       int at_head, void *data)
-{
-       int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
-       unsigned long flags;
-
-       /*
-        * tell I/O scheduler that this isn't a regular read/write (ie it
-        * must not attempt merges on this) and that it acts as a soft
-        * barrier
-        */
-       rq->cmd_type = REQ_TYPE_SPECIAL;
-
-       rq->special = data;
-
-       spin_lock_irqsave(q->queue_lock, flags);
-
-       /*
-        * If command is tagged, release the tag
-        */
-       if (blk_rq_tagged(rq))
-               blk_queue_end_tag(q, rq);
-
-       add_acct_request(q, rq, where);
-       __blk_run_queue(q);
-       spin_unlock_irqrestore(q->queue_lock, flags);
-}
-EXPORT_SYMBOL(blk_insert_request);
-
 static void part_round_stats_single(int cpu, struct hd_struct *part,
                                    unsigned long now)
 {
@@ -1379,15 +1343,19 @@ get_rq:
                 */
                if (list_empty(&plug->list))
                        trace_block_plug(q);
-               else if (!plug->should_sort) {
-                       struct request *__rq;
+               else {
+                       if (!plug->should_sort) {
+                               struct request *__rq;
 
-                       __rq = list_entry_rq(plug->list.prev);
-                       if (__rq->q != q)
-                               plug->should_sort = 1;
+                               __rq = list_entry_rq(plug->list.prev);
+                               if (__rq->q != q)
+                                       plug->should_sort = 1;
+                       }
+                       if (request_count >= BLK_MAX_REQUEST_COUNT) {
+                               blk_flush_plug_list(plug, false);
+                               trace_block_plug(q);
+                       }
                }
-               if (request_count >= BLK_MAX_REQUEST_COUNT)
-                       blk_flush_plug_list(plug, false);
                list_add_tail(&req->queuelist, &plug->list);
                drive_stat_acct(req, 1);
        } else {
@@ -1763,6 +1731,10 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
                return -EIO;
 
        spin_lock_irqsave(q->queue_lock, flags);
+       if (unlikely(blk_queue_dead(q))) {
+               spin_unlock_irqrestore(q->queue_lock, flags);
+               return -ENODEV;
+       }
 
        /*
         * Submitting request must be dequeued before calling this function
@@ -2736,6 +2708,14 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
 {
        trace_block_unplug(q, depth, !from_schedule);
 
+       /*
+        * Don't mess with dead queue.
+        */
+       if (unlikely(blk_queue_dead(q))) {
+               spin_unlock(q->queue_lock);
+               return;
+       }
+
        /*
         * If we are punting this to kblockd, then we can safely drop
         * the queue_lock before waking kblockd (which needs to take
@@ -2812,6 +2792,15 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
                        depth = 0;
                        spin_lock(q->queue_lock);
                }
+
+               /*
+                * Short-circuit if @q is dead
+                */
+               if (unlikely(blk_queue_dead(q))) {
+                       __blk_end_request_all(rq, -ENODEV);
+                       continue;
+               }
+
                /*
                 * rq is already accounted, so use raw insert
                 */
This page took 0.026908 seconds and 5 git commands to generate.