writeback, blkcg: associate each blkcg_gq with the corresponding bdi_writeback_congested
authorTejun Heo <tj@kernel.org>
Fri, 22 May 2015 21:13:38 +0000 (17:13 -0400)
committerJens Axboe <axboe@fb.com>
Tue, 2 Jun 2015 14:33:35 +0000 (08:33 -0600)
A blkg (blkcg_gq) can be congested and decongested independently from
other blkgs on the same request_queue.  Accordingly, for cgroup
writeback support, the congestion status at bdi (backing_dev_info)
should be split and updated separately from matching blkg's.

This patch prepares by adding blkg->wb_congested and associating a
blkg with its matching per-blkcg bdi_writeback_congested on creation.

v2: Updated to associate bdi_writeback_congested instead of
    bdi_writeback.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-cgroup.c
include/linux/blk-cgroup.h

index 979cfdbb94e0fce09c2821d76046d81c7075da2c..31610ae0ebff2bcbd6b9d80da9f04e08bcc1b697 100644 (file)
@@ -182,6 +182,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
                                    struct blkcg_gq *new_blkg)
 {
        struct blkcg_gq *blkg;
+       struct bdi_writeback_congested *wb_congested;
        int i, ret;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
@@ -193,22 +194,30 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
                goto err_free_blkg;
        }
 
+       wb_congested = wb_congested_get_create(&q->backing_dev_info,
+                                              blkcg->css.id, GFP_ATOMIC);
+       if (!wb_congested) {
+               ret = -ENOMEM;
+               goto err_put_css;
+       }
+
        /* allocate */
        if (!new_blkg) {
                new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
                if (unlikely(!new_blkg)) {
                        ret = -ENOMEM;
-                       goto err_put_css;
+                       goto err_put_congested;
                }
        }
        blkg = new_blkg;
+       blkg->wb_congested = wb_congested;
 
        /* link parent */
        if (blkcg_parent(blkcg)) {
                blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
                if (WARN_ON_ONCE(!blkg->parent)) {
                        ret = -EINVAL;
-                       goto err_put_css;
+                       goto err_put_congested;
                }
                blkg_get(blkg->parent);
        }
@@ -245,6 +254,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
        blkg_put(blkg);
        return ERR_PTR(ret);
 
+err_put_congested:
+       wb_congested_put(wb_congested);
 err_put_css:
        css_put(&blkcg->css);
 err_free_blkg:
@@ -391,6 +402,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
        if (blkg->parent)
                blkg_put(blkg->parent);
 
+       wb_congested_put(blkg->wb_congested);
+
        blkg_free(blkg);
 }
 EXPORT_SYMBOL_GPL(__blkg_release_rcu);
index 3033eb173eb42f7f3b1a1172a69d6ea2b5e8a81e..07a32b813ed897d2610fc4abcc205dc6f5f0f0af 100644 (file)
@@ -99,6 +99,12 @@ struct blkcg_gq {
        struct hlist_node               blkcg_node;
        struct blkcg                    *blkcg;
 
+       /*
+        * Each blkg gets congested separately and the congestion state is
+        * propagated to the matching bdi_writeback_congested.
+        */
+       struct bdi_writeback_congested  *wb_congested;
+
        /* all non-root blkcg_gq's are guaranteed to have access to parent */
        struct blkcg_gq                 *parent;
 
This page took 0.027099 seconds and 5 git commands to generate.