Merge remote-tracking branch 'spi/fix/img-spfi' into spi-linus
[deliverable/linux.git] / block / blk-mq.c
index 2390c5541e71fb09c3353004224d76b4c81bddc8..b7b8933ec24188b2807229af1aa844503b46dc89 100644 (file)
@@ -33,6 +33,7 @@ static DEFINE_MUTEX(all_q_mutex);
 static LIST_HEAD(all_q_list);
 
 static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
+static void blk_mq_run_queues(struct request_queue *q);
 
 /*
  * Check if any of the ctx's have pending work in this hardware queue
@@ -117,7 +118,7 @@ void blk_mq_freeze_queue_start(struct request_queue *q)
 
        if (freeze) {
                percpu_ref_kill(&q->mq_usage_counter);
-               blk_mq_run_queues(q, false);
+               blk_mq_run_queues(q);
        }
 }
 EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
@@ -136,6 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
        blk_mq_freeze_queue_start(q);
        blk_mq_freeze_queue_wait(q);
 }
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
 
 void blk_mq_unfreeze_queue(struct request_queue *q)
 {
@@ -902,7 +904,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
                        &hctx->run_work, 0);
 }
 
-void blk_mq_run_queues(struct request_queue *q, bool async)
+static void blk_mq_run_queues(struct request_queue *q)
 {
        struct blk_mq_hw_ctx *hctx;
        int i;
@@ -913,10 +915,9 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
                    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
                        continue;
 
-               blk_mq_run_hw_queue(hctx, async);
+               blk_mq_run_hw_queue(hctx, false);
        }
 }
-EXPORT_SYMBOL(blk_mq_run_queues);
 
 void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx)
 {
@@ -954,7 +955,6 @@ void blk_mq_start_hw_queues(struct request_queue *q)
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queues);
 
-
 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
 {
        struct blk_mq_hw_ctx *hctx;
@@ -1423,7 +1423,8 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set,
        size_t rq_size, left;
 
        tags = blk_mq_init_tags(set->queue_depth, set->reserved_tags,
-                               set->numa_node);
+                               set->numa_node,
+                               BLK_MQ_FLAG_TO_ALLOC_POLICY(set->flags));
        if (!tags)
                return NULL;
 
@@ -1937,7 +1938,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
         */
        if (percpu_ref_init(&q->mq_usage_counter, blk_mq_usage_counter_release,
                            PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
-               goto err_map;
+               goto err_mq_usage;
 
        setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
        blk_queue_rq_timeout(q, 30000);
@@ -1980,7 +1981,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
        if (blk_mq_init_hw_queues(q, set))
-               goto err_hw;
+               goto err_mq_usage;
 
        mutex_lock(&all_q_mutex);
        list_add_tail(&q->all_q_node, &all_q_list);
@@ -1992,7 +1993,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 
        return q;
 
-err_hw:
+err_mq_usage:
        blk_cleanup_queue(q);
 err_hctxs:
        kfree(map);
This page took 0.036552 seconds and 5 git commands to generate.