cgroup: drop unnecessary RCU dancing from __put_css_set()
authorTejun Heo <tj@kernel.org>
Thu, 13 Jun 2013 04:04:54 +0000 (21:04 -0700)
committerTejun Heo <tj@kernel.org>
Thu, 13 Jun 2013 17:55:18 +0000 (10:55 -0700)
__put_css_set() does RCU read access on @cgrp across dropping
@cgrp->count so that it can continue accessing @cgrp even if the count
reached zero and destruction of the cgroup commenced.  Given that both
sides - __css_put() and cgroup_destroy_locked() - are cold paths, this
is unnecessary.  Just making cgroup_destroy_locked() grab css_set_lock
while checking @cgrp->count is enough.

Remove the RCU read locking from __put_css_set() and make
cgroup_destroy_locked() read-lock css_set_lock when checking
@cgrp->count.  This will also allow removing @cgrp->count.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
kernel/cgroup.c

index 84efb344fdf63b3389706998afdf4895dbb2a222..1a68241ca835bc601954134c583e3b27337d214d 100644 (file)
@@ -407,19 +407,13 @@ static void __put_css_set(struct css_set *cset, int taskexit)
                list_del(&link->cset_link);
                list_del(&link->cgrp_link);
 
-               /*
-                * We may not be holding cgroup_mutex, and if cgrp->count is
-                * dropped to 0 the cgroup can be destroyed at any time, hence
-                * rcu_read_lock is used to keep it alive.
-                */
-               rcu_read_lock();
+               /* @cgrp can't go away while we're holding css_set_lock */
                if (atomic_dec_and_test(&cgrp->count) &&
                    notify_on_release(cgrp)) {
                        if (taskexit)
                                set_bit(CGRP_RELEASABLE, &cgrp->flags);
                        check_for_release(cgrp);
                }
-               rcu_read_unlock();
 
                kfree(link);
        }
@@ -4370,11 +4364,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        struct cgroup *parent = cgrp->parent;
        struct cgroup_event *event, *tmp;
        struct cgroup_subsys *ss;
+       bool empty;
 
        lockdep_assert_held(&d->d_inode->i_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
-       if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children))
+       /*
+        * css_set_lock prevents @cgrp from being removed while
+        * __put_css_set() is in progress.
+        */
+       read_lock(&css_set_lock);
+       empty = !atomic_read(&cgrp->count) && list_empty(&cgrp->children);
+       read_unlock(&css_set_lock);
+       if (!empty)
                return -EBUSY;
 
        /*
@@ -5051,8 +5053,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
 
 static void check_for_release(struct cgroup *cgrp)
 {
-       /* All of these checks rely on RCU to keep the cgroup
-        * structure alive */
        if (cgroup_is_releasable(cgrp) &&
            !atomic_read(&cgrp->count) && list_empty(&cgrp->children)) {
                /*
This page took 0.032088 seconds and 5 git commands to generate.