blkcg: let blkio_group point to blkio_cgroup directly
authorTejun Heo <tj@kernel.org>
Mon, 5 Mar 2012 21:15:11 +0000 (13:15 -0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 6 Mar 2012 20:27:23 +0000 (21:27 +0100)
Currently, blkg points to the associated blkcg via its css_id.  This
unnecessarily complicates dereferencing blkcg.  Let blkg hold a
reference to the associated blkcg and point directly to it and disable
css_id on blkio_subsys.

This change requires splitting blkiocg_destroy() into
blkiocg_pre_destroy() and blkiocg_destroy() so that all blkg's can be
destroyed and all the blkcg references held by them dropped during
cgroup removal.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-throttle.c
block/cfq-iosched.c

index 76942360872b98801bcbfbaf693e5e08ac2cd08f..d42d826ece39497f61ef7b1d189943662fc33ddb 100644 (file)
@@ -37,6 +37,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
                              struct cgroup_taskset *);
 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
                           struct cgroup_taskset *);
+static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
 
@@ -51,10 +52,10 @@ struct cgroup_subsys blkio_subsys = {
        .create = blkiocg_create,
        .can_attach = blkiocg_can_attach,
        .attach = blkiocg_attach,
+       .pre_destroy = blkiocg_pre_destroy,
        .destroy = blkiocg_destroy,
        .populate = blkiocg_populate,
        .subsys_id = blkio_subsys_id,
-       .use_id = 1,
        .module = THIS_MODULE,
 };
 EXPORT_SYMBOL_GPL(blkio_subsys);
@@ -442,6 +443,7 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
        if (blkg)
                return blkg;
 
+       /* blkg holds a reference to blkcg */
        if (!css_tryget(&blkcg->css))
                return ERR_PTR(-EINVAL);
 
@@ -463,15 +465,16 @@ struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
 
                spin_lock_init(&new_blkg->stats_lock);
                rcu_assign_pointer(new_blkg->q, q);
-               new_blkg->blkcg_id = css_id(&blkcg->css);
+               new_blkg->blkcg = blkcg;
                new_blkg->plid = plid;
                cgroup_path(blkcg->css.cgroup, new_blkg->path,
                            sizeof(new_blkg->path));
+       } else {
+               css_put(&blkcg->css);
        }
 
        rcu_read_lock();
        spin_lock_irq(q->queue_lock);
-       css_put(&blkcg->css);
 
        /* did bypass get turned on inbetween? */
        if (unlikely(blk_queue_bypass(q)) && !for_root) {
@@ -500,6 +503,7 @@ out:
        if (new_blkg) {
                free_percpu(new_blkg->stats_cpu);
                kfree(new_blkg);
+               css_put(&blkcg->css);
        }
        return blkg;
 }
@@ -508,7 +512,6 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
 {
        hlist_del_init_rcu(&blkg->blkcg_node);
-       blkg->blkcg_id = 0;
 }
 
 /*
@@ -517,24 +520,17 @@ static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  */
 int blkiocg_del_blkio_group(struct blkio_group *blkg)
 {
-       struct blkio_cgroup *blkcg;
+       struct blkio_cgroup *blkcg = blkg->blkcg;
        unsigned long flags;
-       struct cgroup_subsys_state *css;
        int ret = 1;
 
-       rcu_read_lock();
-       css = css_lookup(&blkio_subsys, blkg->blkcg_id);
-       if (css) {
-               blkcg = container_of(css, struct blkio_cgroup, css);
-               spin_lock_irqsave(&blkcg->lock, flags);
-               if (!hlist_unhashed(&blkg->blkcg_node)) {
-                       __blkiocg_del_blkio_group(blkg);
-                       ret = 0;
-               }
-               spin_unlock_irqrestore(&blkcg->lock, flags);
+       spin_lock_irqsave(&blkcg->lock, flags);
+       if (!hlist_unhashed(&blkg->blkcg_node)) {
+               __blkiocg_del_blkio_group(blkg);
+               ret = 0;
        }
+       spin_unlock_irqrestore(&blkcg->lock, flags);
 
-       rcu_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
@@ -1387,7 +1383,8 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
                                ARRAY_SIZE(blkio_files));
 }
 
-static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
+static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
+                              struct cgroup *cgroup)
 {
        struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
        unsigned long flags;
@@ -1396,6 +1393,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
        struct blkio_policy_type *blkiop;
 
        rcu_read_lock();
+
        do {
                spin_lock_irqsave(&blkcg->lock, flags);
 
@@ -1425,8 +1423,15 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
                spin_unlock(&blkio_list_lock);
        } while (1);
 
-       free_css_id(&blkio_subsys, &blkcg->css);
        rcu_read_unlock();
+
+       return 0;
+}
+
+static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
+{
+       struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
+
        if (blkcg != &blkio_root_cgroup)
                kfree(blkcg);
 }
index 7ebecf6ea8f10e8048c82e5a11a4d7d186fa7271..ca1fc637bd6ec53d07e803ac7cf8d85cf6541041 100644 (file)
@@ -163,7 +163,7 @@ struct blkio_group {
        /* Pointer to the associated request_queue, RCU protected */
        struct request_queue __rcu *q;
        struct hlist_node blkcg_node;
-       unsigned short blkcg_id;
+       struct blkio_cgroup *blkcg;
        /* Store cgroup path */
        char path[128];
        /* policy which owns this blk group */
index 52a429397d3b11254964353e12819c52d0c2002f..fe6a442b84829704acbd099666a69ac54435bb2d 100644 (file)
@@ -169,6 +169,9 @@ static void throtl_put_tg(struct throtl_grp *tg)
        if (!atomic_dec_and_test(&tg->ref))
                return;
 
+       /* release the extra blkcg reference this blkg has been holding */
+       css_put(&tg->blkg.blkcg->css);
+
        /*
         * A group is freed in rcu manner. But having an rcu lock does not
         * mean that one can access all the fields of blkg and assume these
index f67d109eb9744762fd6768d3809fbcc339647a6d..9ef86fbfc9ae5d9b23c3ee7941ac36db532a1c1a 100644 (file)
@@ -1133,6 +1133,10 @@ static void cfq_put_cfqg(struct cfq_group *cfqg)
        cfqg->ref--;
        if (cfqg->ref)
                return;
+
+       /* release the extra blkcg reference this blkg has been holding */
+       css_put(&cfqg->blkg.blkcg->css);
+
        for_each_cfqg_st(cfqg, i, j, st)
                BUG_ON(!RB_EMPTY_ROOT(&st->rb));
        free_percpu(cfqg->blkg.stats_cpu);
This page took 0.06315 seconds and 5 git commands to generate.