block: fix plug list flushing for nomerge queues
authorJeff Moyer <jmoyer@redhat.com>
Tue, 20 Oct 2015 15:13:51 +0000 (23:13 +0800)
committerJens Axboe <axboe@fb.com>
Wed, 21 Oct 2015 21:00:48 +0000 (15:00 -0600)
Request queues with merging disabled will not flush the plug list after
BLK_MAX_REQUEST_COUNT requests have been queued, since the code relies
on blk_attempt_plug_merge to compute the request_count.  Fix this by
computing the number of queued requests even for nomerge queues.

Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-mq.c
block/blk.h

index 2eb722d48773cb8a8de49d58b934eed830755da7..f0ae087ead06db4794e5fc399b8080cb83b7a77b 100644 (file)
@@ -1594,6 +1594,30 @@ out:
        return ret;
 }
 
+unsigned int blk_plug_queued_count(struct request_queue *q)
+{
+       struct blk_plug *plug;
+       struct request *rq;
+       struct list_head *plug_list;
+       unsigned int ret = 0;
+
+       plug = current->plug;
+       if (!plug)
+               goto out;
+
+       if (q->mq_ops)
+               plug_list = &plug->mq_list;
+       else
+               plug_list = &plug->list;
+
+       list_for_each_entry(rq, plug_list, queuelist) {
+               if (rq->q == q)
+                       ret++;
+       }
+out:
+       return ret;
+}
+
 void init_request_from_bio(struct request *req, struct bio *bio)
 {
        req->cmd_type = REQ_TYPE_FS;
@@ -1641,9 +1665,11 @@ static void blk_queue_bio(struct request_queue *q, struct bio *bio)
         * Check if we can merge with the plugged list before grabbing
         * any locks.
         */
-       if (!blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, NULL))
-               return;
+       if (!blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        spin_lock_irq(q->queue_lock);
 
index d921cd5177f542f24fdcc052ce1e53e0c310c758..9683a561efcd73a9e1e38a6909e6edae23c97b9b 100644 (file)
@@ -1268,9 +1268,12 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        blk_queue_split(q, &bio, q->bio_split);
 
-       if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq))
-               return;
+       if (!is_flush_fua && !blk_queue_nomerges(q)) {
+               if (blk_attempt_plug_merge(q, bio, &request_count,
+                                          &same_queue_rq))
+                       return;
+       } else
+               request_count = blk_plug_queued_count(q);
 
        rq = blk_mq_map_request(q, bio, &data);
        if (unlikely(!rq))
index 98614ad37c81f22e175d9455967f9f6a98b78676..aa27d0292af19fbd13c0a3c1ef4b336db4d2bc3b 100644 (file)
@@ -86,6 +86,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
                            unsigned int *request_count,
                            struct request **same_queue_rq);
+unsigned int blk_plug_queued_count(struct request_queue *q);
 
 void blk_account_io_start(struct request *req, bool new_io);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
This page took 0.027459 seconds and 5 git commands to generate.