cgroup: reorganize cgroup_create()
[deliverable/linux.git] / kernel / cgroup.c
index 21667f396a1ed86d6f1abc59c2bb397dec45a40d..1d6106c3fb4e22d41b270bc10de15f79450da7e2 100644 (file)
@@ -99,6 +99,12 @@ static DEFINE_MUTEX(cgroup_mutex);
 static DECLARE_RWSEM(css_set_rwsem);
 #endif
 
+/*
+ * Protects cgroup_idr and css_idr so that IDs can be released without
+ * grabbing cgroup_mutex.
+ */
+static DEFINE_SPINLOCK(cgroup_idr_lock);
+
 /*
  * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
  * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
@@ -190,6 +196,37 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
                              bool is_add);
 static void cgroup_pidlist_destroy_all(struct cgroup *cgrp);
 
+/* IDR wrappers which synchronize using cgroup_idr_lock */
+static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
+                           gfp_t gfp_mask)
+{
+       int ret;
+
+       idr_preload(gfp_mask);
+       spin_lock_bh(&cgroup_idr_lock);
+       ret = idr_alloc(idr, ptr, start, end, gfp_mask);
+       spin_unlock_bh(&cgroup_idr_lock);
+       idr_preload_end();
+       return ret;
+}
+
+static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
+{
+       void *ret;
+
+       spin_lock_bh(&cgroup_idr_lock);
+       ret = idr_replace(idr, ptr, id);
+       spin_unlock_bh(&cgroup_idr_lock);
+       return ret;
+}
+
+static void cgroup_idr_remove(struct idr *idr, int id)
+{
+       spin_lock_bh(&cgroup_idr_lock);
+       idr_remove(idr, id);
+       spin_unlock_bh(&cgroup_idr_lock);
+}
+
 /**
  * cgroup_css - obtain a cgroup's css for the specified subsystem
  * @cgrp: the cgroup of interest
@@ -246,11 +283,10 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp)
        return test_bit(CGRP_DEAD, &cgrp->flags);
 }
 
-struct cgroup_subsys_state *seq_css(struct seq_file *seq)
+struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
 {
-       struct kernfs_open_file *of = seq->private;
        struct cgroup *cgrp = of->kn->parent->priv;
-       struct cftype *cft = seq_cft(seq);
+       struct cftype *cft = of_cft(of);
 
        /*
         * This is open and unprotected implementation of cgroup_css().
@@ -265,7 +301,7 @@ struct cgroup_subsys_state *seq_css(struct seq_file *seq)
        else
                return &cgrp->dummy_css;
 }
-EXPORT_SYMBOL_GPL(seq_css);
+EXPORT_SYMBOL_GPL(of_css);
 
 /**
  * cgroup_is_descendant - test ancestry
@@ -402,7 +438,7 @@ struct cgrp_cset_link {
  * reference-counted, to improve performance when child cgroups
  * haven't been created.
  */
-static struct css_set init_css_set = {
+struct css_set init_css_set = {
        .refcount               = ATOMIC_INIT(1),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
@@ -998,8 +1034,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
        if (cft->read_u64 || cft->read_s64 || cft->seq_show)
                mode |= S_IRUGO;
 
-       if (cft->write_u64 || cft->write_s64 || cft->write_string ||
-           cft->trigger)
+       if (cft->write_u64 || cft->write_s64 || cft->write)
                mode |= S_IWUSR;
 
        return mode;
@@ -1052,15 +1087,7 @@ static void cgroup_put(struct cgroup *cgrp)
        if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
                return;
 
-       /*
-        * XXX: cgrp->id is only used to look up css's.  As cgroup and
-        * css's lifetimes will be decoupled, it should be made
-        * per-subsystem and moved to css->id so that lookups are
-        * successful until the target css is released.
-        */
-       mutex_lock(&cgroup_mutex);
-       idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
-       mutex_unlock(&cgroup_mutex);
+       cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
        cgrp->id = -1;
 
        call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -1531,7 +1558,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
        lockdep_assert_held(&cgroup_tree_mutex);
        lockdep_assert_held(&cgroup_mutex);
 
-       ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
+       ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
        if (ret < 0)
                goto out;
        root_cgrp->id = ret;
@@ -1859,7 +1886,7 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
 
 /**
  * cgroup_task_migrate - move a task from one cgroup to another.
- * @old_cgrp; the cgroup @tsk is being migrated from
+ * @old_cgrp: the cgroup @tsk is being migrated from
  * @tsk: the task being migrated
  * @new_cset: the new css_set @tsk is being attached to
  *
@@ -2205,12 +2232,18 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
  * function to attach either it or all tasks in its threadgroup. Will lock
  * cgroup_mutex and threadgroup.
  */
-static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
+static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
+                                   size_t nbytes, loff_t off, bool threadgroup)
 {
        struct task_struct *tsk;
        const struct cred *cred = current_cred(), *tcred;
+       struct cgroup *cgrp = of_css(of)->cgroup;
+       pid_t pid;
        int ret;
 
+       if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+               return -EINVAL;
+
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
 
@@ -2278,7 +2311,7 @@ retry_find_task:
        put_task_struct(tsk);
 out_unlock_cgroup:
        mutex_unlock(&cgroup_mutex);
-       return ret;
+       return ret ?: nbytes;
 }
 
 /**
@@ -2312,43 +2345,43 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
-static int cgroup_tasks_write(struct cgroup_subsys_state *css,
-                             struct cftype *cft, u64 pid)
+static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
+                                 char *buf, size_t nbytes, loff_t off)
 {
-       return attach_task_by_pid(css->cgroup, pid, false);
+       return __cgroup_procs_write(of, buf, nbytes, off, false);
 }
 
-static int cgroup_procs_write(struct cgroup_subsys_state *css,
-                             struct cftype *cft, u64 tgid)
+static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
+                                 char *buf, size_t nbytes, loff_t off)
 {
-       return attach_task_by_pid(css->cgroup, tgid, true);
+       return __cgroup_procs_write(of, buf, nbytes, off, true);
 }
 
-static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
-                                     struct cftype *cft, char *buffer)
+static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
+                                         char *buf, size_t nbytes, loff_t off)
 {
-       struct cgroup_root *root = css->cgroup->root;
+       struct cgroup *cgrp = of_css(of)->cgroup;
+       struct cgroup_root *root = cgrp->root;
 
        BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX);
-       if (!cgroup_lock_live_group(css->cgroup))
+       if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
        spin_lock(&release_agent_path_lock);
-       strlcpy(root->release_agent_path, buffer,
+       strlcpy(root->release_agent_path, strstrip(buf),
                sizeof(root->release_agent_path));
        spin_unlock(&release_agent_path_lock);
        mutex_unlock(&cgroup_mutex);
-       return 0;
+       return nbytes;
 }
 
 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
 
-       if (!cgroup_lock_live_group(cgrp))
-               return -ENODEV;
+       spin_lock(&release_agent_path_lock);
        seq_puts(seq, cgrp->root->release_agent_path);
+       spin_unlock(&release_agent_path_lock);
        seq_putc(seq, '\n');
-       mutex_unlock(&cgroup_mutex);
        return 0;
 }
 
@@ -2503,31 +2536,34 @@ out_finish:
 }
 
 /* change the enabled child controllers for a cgroup in the default hierarchy */
-static int cgroup_subtree_control_write(struct cgroup_subsys_state *dummy_css,
-                                       struct cftype *cft, char *buffer)
+static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
+                                           char *buf, size_t nbytes,
+                                           loff_t off)
 {
-       unsigned int enable_req = 0, disable_req = 0, enable, disable;
-       struct cgroup *cgrp = dummy_css->cgroup, *child;
+       unsigned int enable = 0, disable = 0;
+       struct cgroup *cgrp = of_css(of)->cgroup, *child;
        struct cgroup_subsys *ss;
-       char *tok, *p;
+       char *tok;
        int ssid, ret;
 
        /*
-        * Parse input - white space separated list of subsystem names
-        * prefixed with either + or -.
+        * Parse input - space separated list of subsystem names prefixed
+        * with either + or -.
         */
-       p = buffer;
-       while ((tok = strsep(&p, " \t\n"))) {
+       buf = strstrip(buf);
+       while ((tok = strsep(&buf, " "))) {
+               if (tok[0] == '\0')
+                       continue;
                for_each_subsys(ss, ssid) {
                        if (ss->disabled || strcmp(tok + 1, ss->name))
                                continue;
 
                        if (*tok == '+') {
-                               enable_req |= 1 << ssid;
-                               disable_req &= ~(1 << ssid);
+                               enable |= 1 << ssid;
+                               disable &= ~(1 << ssid);
                        } else if (*tok == '-') {
-                               disable_req |= 1 << ssid;
-                               enable_req &= ~(1 << ssid);
+                               disable |= 1 << ssid;
+                               enable &= ~(1 << ssid);
                        } else {
                                return -EINVAL;
                        }
@@ -2544,10 +2580,7 @@ static int cgroup_subtree_control_write(struct cgroup_subsys_state *dummy_css,
         * active_ref protection.
         */
        cgroup_get(cgrp);
-       kernfs_break_active_protection(cgrp->control_kn);
-retry:
-       enable = enable_req;
-       disable = disable_req;
+       kernfs_break_active_protection(of->kn);
 
        mutex_lock(&cgroup_tree_mutex);
 
@@ -2565,17 +2598,21 @@ retry:
                         * cases, wait till it's gone using offline_waitq.
                         */
                        cgroup_for_each_live_child(child, cgrp) {
-                               wait_queue_t wait;
+                               DEFINE_WAIT(wait);
 
                                if (!cgroup_css(child, ss))
                                        continue;
 
+                               cgroup_get(child);
                                prepare_to_wait(&child->offline_waitq, &wait,
                                                TASK_UNINTERRUPTIBLE);
                                mutex_unlock(&cgroup_tree_mutex);
                                schedule();
                                finish_wait(&child->offline_waitq, &wait);
-                               goto retry;
+                               cgroup_put(child);
+
+                               ret = restart_syscall();
+                               goto out_unbreak;
                        }
 
                        /* unavailable or not enabled on the parent? */
@@ -2659,9 +2696,10 @@ out_unlock:
        mutex_unlock(&cgroup_mutex);
 out_unlock_tree:
        mutex_unlock(&cgroup_tree_mutex);
-       kernfs_unbreak_active_protection(cgrp->control_kn);
+out_unbreak:
+       kernfs_unbreak_active_protection(of->kn);
        cgroup_put(cgrp);
-       return ret;
+       return ret ?: nbytes;
 
 err_undo_css:
        cgrp->child_subsys_mask &= ~enable;
@@ -2694,6 +2732,9 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
        struct cgroup_subsys_state *css;
        int ret;
 
+       if (cft->write)
+               return cft->write(of, buf, nbytes, off);
+
        /*
         * kernfs guarantees that a file isn't deleted with operations in
         * flight, which means that the matching css is and stays alive and
@@ -2704,9 +2745,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
        css = cgroup_css(cgrp, cft->ss);
        rcu_read_unlock();
 
-       if (cft->write_string) {
-               ret = cft->write_string(css, cft, strstrip(buf));
-       } else if (cft->write_u64) {
+       if (cft->write_u64) {
                unsigned long long v;
                ret = kstrtoull(buf, 0, &v);
                if (!ret)
@@ -2716,8 +2755,6 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
                ret = kstrtoll(buf, 0, &v);
                if (!ret)
                        ret = cft->write_s64(css, cft, v);
-       } else if (cft->trigger) {
-               ret = cft->trigger(css, (unsigned int)cft->private);
        } else {
                ret = -EINVAL;
        }
@@ -2850,9 +2887,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
                return ret;
        }
 
-       if (cft->seq_show == cgroup_subtree_control_show)
-               cgrp->control_kn = kn;
-       else if (cft->seq_show == cgroup_populated_show)
+       if (cft->seq_show == cgroup_populated_show)
                cgrp->populated_kn = kn;
        return 0;
 }
@@ -3739,7 +3774,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 
        /*
         * We aren't being called from kernfs and there's no guarantee on
-        * @kn->priv's validity.  For this and css_tryget_from_dir(),
+        * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
         * @kn->priv is RCU safe.  Let's do the RCU dancing.
         */
        rcu_read_lock();
@@ -3922,7 +3957,7 @@ static struct cftype cgroup_base_files[] = {
                .seq_stop = cgroup_pidlist_stop,
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_PROCS,
-               .write_u64 = cgroup_procs_write,
+               .write = cgroup_procs_write,
                .mode = S_IRUGO | S_IWUSR,
        },
        {
@@ -3950,7 +3985,7 @@ static struct cftype cgroup_base_files[] = {
                .name = "cgroup.subtree_control",
                .flags = CFTYPE_ONLY_ON_DFL,
                .seq_show = cgroup_subtree_control_show,
-               .write_string = cgroup_subtree_control_write,
+               .write = cgroup_subtree_control_write,
        },
        {
                .name = "cgroup.populated",
@@ -3971,7 +4006,7 @@ static struct cftype cgroup_base_files[] = {
                .seq_stop = cgroup_pidlist_stop,
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_TASKS,
-               .write_u64 = cgroup_tasks_write,
+               .write = cgroup_tasks_write,
                .mode = S_IRUGO | S_IWUSR,
        },
        {
@@ -3984,7 +4019,7 @@ static struct cftype cgroup_base_files[] = {
                .name = "release_agent",
                .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
                .seq_show = cgroup_release_agent_show,
-               .write_string = cgroup_release_agent_write,
+               .write = cgroup_release_agent_write,
                .max_write_len = PATH_MAX - 1,
        },
        { }     /* terminate */
@@ -4028,9 +4063,9 @@ err:
  *    Implemented in kill_css().
  *
  * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
- *    and thus css_tryget() is guaranteed to fail, the css can be offlined
- *    by invoking offline_css().  After offlining, the base ref is put.
- *    Implemented in css_killed_work_fn().
+ *    and thus css_tryget_online() is guaranteed to fail, the css can be
+ *    offlined by invoking offline_css().  After offlining, the base ref is
+ *    put.  Implemented in css_killed_work_fn().
  *
  * 3. When the percpu_ref reaches zero, the only possible remaining
  *    accessors are inside RCU read sections.  css_release() schedules the
@@ -4069,22 +4104,28 @@ static void css_release(struct percpu_ref *ref)
 {
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
+       struct cgroup_subsys *ss = css->ss;
+
+       cgroup_idr_remove(&ss->css_idr, css->id);
 
-       RCU_INIT_POINTER(css->cgroup->subsys[css->ss->id], NULL);
        call_rcu(&css->rcu_head, css_free_rcu_fn);
 }
 
-static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
-                    struct cgroup *cgrp)
+static void init_and_link_css(struct cgroup_subsys_state *css,
+                             struct cgroup_subsys *ss, struct cgroup *cgrp)
 {
+       cgroup_get(cgrp);
+
        css->cgroup = cgrp;
        css->ss = ss;
        css->flags = 0;
 
-       if (cgrp->parent)
+       if (cgrp->parent) {
                css->parent = cgroup_css(cgrp->parent, ss);
-       else
+               css_get(css->parent);
+       } else {
                css->flags |= CSS_ROOT;
+       }
 
        BUG_ON(cgroup_css(cgrp, ss));
 }
@@ -4150,23 +4191,28 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
        if (IS_ERR(css))
                return PTR_ERR(css);
 
+       init_and_link_css(css, ss, cgrp);
+
        err = percpu_ref_init(&css->refcnt, css_release);
        if (err)
                goto err_free_css;
 
-       init_css(css, ss, cgrp);
+       err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_NOWAIT);
+       if (err < 0)
+               goto err_free_percpu_ref;
+       css->id = err;
 
        err = cgroup_populate_dir(cgrp, 1 << ss->id);
        if (err)
-               goto err_free_percpu_ref;
+               goto err_free_id;
+
+       /* @css is ready to be brought online now, make it visible */
+       cgroup_idr_replace(&ss->css_idr, css, css->id);
 
        err = online_css(css);
        if (err)
                goto err_clear_dir;
 
-       cgroup_get(cgrp);
-       css_get(css->parent);
-
        if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
            parent->parent) {
                pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4180,10 +4226,12 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
 
 err_clear_dir:
        cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
+err_free_id:
+       cgroup_idr_remove(&ss->css_idr, css->id);
 err_free_percpu_ref:
        percpu_ref_cancel_init(&css->refcnt);
 err_free_css:
-       ss->css_free(css);
+       call_rcu(&css->rcu_head, css_free_rcu_fn);
        return err;
 }
 
@@ -4198,15 +4246,10 @@ static long cgroup_create(struct cgroup *parent, const char *name,
 {
        struct cgroup *cgrp;
        struct cgroup_root *root = parent->root;
-       int ssid, err;
+       int ssid, ret;
        struct cgroup_subsys *ss;
        struct kernfs_node *kn;
 
-       /* allocate the cgroup and its ID, 0 is reserved for the root */
-       cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
-       if (!cgrp)
-               return -ENOMEM;
-
        mutex_lock(&cgroup_tree_mutex);
 
        /*
@@ -4217,25 +4260,32 @@ static long cgroup_create(struct cgroup *parent, const char *name,
         * don't get nasty surprises if we ever grow another caller.
         */
        if (!cgroup_lock_live_group(parent)) {
-               err = -ENODEV;
-               goto err_unlock_tree;
+               ret = -ENODEV;
+               goto out_unlock_tree;
+       }
+
+       /* allocate the cgroup and its ID, 0 is reserved for the root */
+       cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
+       if (!cgrp) {
+               ret = -ENOMEM;
+               goto out_unlock;
        }
 
        /*
         * Temporarily set the pointer to NULL, so idr_find() won't return
         * a half-baked cgroup.
         */
-       cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+       cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
        if (cgrp->id < 0) {
-               err = -ENOMEM;
-               goto err_unlock;
+               ret = -ENOMEM;
+               goto out_free_cgrp;
        }
 
        init_cgroup_housekeeping(cgrp);
 
        cgrp->parent = parent;
        cgrp->dummy_css.parent = &parent->dummy_css;
-       cgrp->root = parent->root;
+       cgrp->root = root;
 
        if (notify_on_release(parent))
                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
@@ -4246,8 +4296,8 @@ static long cgroup_create(struct cgroup *parent, const char *name,
        /* create the directory */
        kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
        if (IS_ERR(kn)) {
-               err = PTR_ERR(kn);
-               goto err_free_id;
+               ret = PTR_ERR(kn);
+               goto out_free_id;
        }
        cgrp->kn = kn;
 
@@ -4268,22 +4318,22 @@ static long cgroup_create(struct cgroup *parent, const char *name,
         * @cgrp is now fully operational.  If something fails after this
         * point, it'll be released via the normal destruction path.
         */
-       idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
+       cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
 
-       err = cgroup_kn_set_ugid(kn);
-       if (err)
-               goto err_destroy;
+       ret = cgroup_kn_set_ugid(kn);
+       if (ret)
+               goto out_destroy;
 
-       err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
-       if (err)
-               goto err_destroy;
+       ret = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+       if (ret)
+               goto out_destroy;
 
        /* let's create and online css's */
        for_each_subsys(ss, ssid) {
                if (parent->child_subsys_mask & (1 << ssid)) {
-                       err = create_css(cgrp, ss);
-                       if (err)
-                               goto err_destroy;
+                       ret = create_css(cgrp, ss);
+                       if (ret)
+                               goto out_destroy;
                }
        }
 
@@ -4296,25 +4346,22 @@ static long cgroup_create(struct cgroup *parent, const char *name,
 
        kernfs_activate(kn);
 
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgroup_tree_mutex);
-
-       return 0;
+       ret = 0;
+       goto out_unlock;
 
-err_free_id:
-       idr_remove(&root->cgroup_idr, cgrp->id);
-err_unlock:
+out_free_id:
+       cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
+out_free_cgrp:
+       kfree(cgrp);
+out_unlock:
        mutex_unlock(&cgroup_mutex);
-err_unlock_tree:
+out_unlock_tree:
        mutex_unlock(&cgroup_tree_mutex);
-       kfree(cgrp);
-       return err;
+       return ret;
 
-err_destroy:
+out_destroy:
        cgroup_destroy_locked(cgrp);
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgroup_tree_mutex);
-       return err;
+       goto out_unlock;
 }
 
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
@@ -4341,7 +4388,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 
 /*
  * This is called when the refcnt of a css is confirmed to be killed.
- * css_tryget() is now guaranteed to fail.
+ * css_tryget_online() is now guaranteed to fail.
  */
 static void css_killed_work_fn(struct work_struct *work)
 {
@@ -4353,8 +4400,8 @@ static void css_killed_work_fn(struct work_struct *work)
        mutex_lock(&cgroup_mutex);
 
        /*
-        * css_tryget() is guaranteed to fail now.  Tell subsystems to
-        * initate destruction.
+        * css_tryget_online() is guaranteed to fail now.  Tell subsystems
+        * to initate destruction.
         */
        offline_css(css);
 
@@ -4395,8 +4442,8 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
  *
  * This function initiates destruction of @css by removing cgroup interface
  * files and putting its base reference.  ->css_offline() will be invoked
- * asynchronously once css_tryget() is guaranteed to fail and when the
- * reference count reaches zero, @css will be released.
+ * asynchronously once css_tryget_online() is guaranteed to fail and when
+ * the reference count reaches zero, @css will be released.
  */
 static void kill_css(struct cgroup_subsys_state *css)
 {
@@ -4417,7 +4464,7 @@ static void kill_css(struct cgroup_subsys_state *css)
        /*
         * cgroup core guarantees that, by the time ->css_offline() is
         * invoked, no new css reference will be given out via
-        * css_tryget().  We can't simply call percpu_ref_kill() and
+        * css_tryget_online().  We can't simply call percpu_ref_kill() and
         * proceed to offlining css's because percpu_ref_kill() doesn't
         * guarantee that the ref is seen as killed on all CPUs on return.
         *
@@ -4433,9 +4480,9 @@ static void kill_css(struct cgroup_subsys_state *css)
  *
  * css's make use of percpu refcnts whose killing latency shouldn't be
  * exposed to userland and are RCU protected.  Also, cgroup core needs to
- * guarantee that css_tryget() won't succeed by the time ->css_offline() is
- * invoked.  To satisfy all the requirements, destruction is implemented in
- * the following two steps.
+ * guarantee that css_tryget_online() won't succeed by the time
+ * ->css_offline() is invoked.  To satisfy all the requirements,
+ * destruction is implemented in the following two steps.
  *
  * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
  *     userland visible parts and start killing the percpu refcnts of
@@ -4529,9 +4576,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        /*
         * There are two control paths which try to determine cgroup from
         * dentry without going through kernfs - cgroupstats_build() and
-        * css_tryget_from_dir().  Those are supported by RCU protecting
-        * clearing of cgrp->kn->priv backpointer, which should happen
-        * after all files under it have been removed.
+        * css_tryget_online_from_dir().  Those are supported by RCU
+        * protecting clearing of cgrp->kn->priv backpointer, which should
+        * happen after all files under it have been removed.
         */
        kernfs_remove(cgrp->kn);        /* @cgrp has an extra ref on its kn */
        RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
@@ -4543,7 +4590,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
 
 /**
  * cgroup_destroy_css_killed - the second step of cgroup destruction
- * @work: cgroup->destroy_free_work
+ * @cgrp: the cgroup whose csses have just finished offlining
  *
  * This function is invoked from a work item for a cgroup which is being
  * destroyed after all css's are offlined and performs the rest of
@@ -4606,7 +4653,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
        .rename                 = cgroup_rename,
 };
 
-static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
+static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
 {
        struct cgroup_subsys_state *css;
 
@@ -4615,6 +4662,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
        mutex_lock(&cgroup_tree_mutex);
        mutex_lock(&cgroup_mutex);
 
+       idr_init(&ss->css_idr);
        INIT_LIST_HEAD(&ss->cfts);
 
        /* Create the root cgroup state for this subsystem */
@@ -4622,7 +4670,14 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
        css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
        /* We don't handle early failures gracefully */
        BUG_ON(IS_ERR(css));
-       init_css(css, ss, &cgrp_dfl_root.cgrp);
+       init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
+       if (early) {
+               /* idr_alloc() can't be called safely during early init */
+               css->id = 1;
+       } else {
+               css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
+               BUG_ON(css->id < 0);
+       }
 
        /* Update the init_css_set to contain a subsys
         * pointer to this state - since the subsystem is
@@ -4673,7 +4728,7 @@ int __init cgroup_init_early(void)
                ss->name = cgroup_subsys_name[i];
 
                if (ss->early_init)
-                       cgroup_init_subsys(ss);
+                       cgroup_init_subsys(ss, true);
        }
        return 0;
 }
@@ -4705,8 +4760,16 @@ int __init cgroup_init(void)
        mutex_unlock(&cgroup_tree_mutex);
 
        for_each_subsys(ss, ssid) {
-               if (!ss->early_init)
-                       cgroup_init_subsys(ss);
+               if (ss->early_init) {
+                       struct cgroup_subsys_state *css =
+                               init_css_set.subsys[ss->id];
+
+                       css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
+                                                  GFP_KERNEL);
+                       BUG_ON(css->id < 0);
+               } else {
+                       cgroup_init_subsys(ss, false);
+               }
 
                list_add_tail(&init_css_set.e_cset_node[ssid],
                              &cgrp_dfl_root.cgrp.e_csets[ssid]);
@@ -5112,7 +5175,7 @@ static int __init cgroup_disable(char *str)
 __setup("cgroup_disable=", cgroup_disable);
 
 /**
- * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
+ * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  * @dentry: directory dentry of interest
  * @ss: subsystem of interest
  *
@@ -5120,8 +5183,8 @@ __setup("cgroup_disable=", cgroup_disable);
  * to get the corresponding css and return it.  If such css doesn't exist
  * or can't be pinned, an ERR_PTR value is returned.
  */
-struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
-                                               struct cgroup_subsys *ss)
+struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
+                                                      struct cgroup_subsys *ss)
 {
        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
        struct cgroup_subsys_state *css = NULL;
@@ -5143,7 +5206,7 @@ struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
        if (cgrp)
                css = cgroup_css(cgrp, ss);
 
-       if (!css || !css_tryget(css))
+       if (!css || !css_tryget_online(css))
                css = ERR_PTR(-ENOENT);
 
        rcu_read_unlock();
@@ -5160,14 +5223,8 @@ struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
  */
 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
 {
-       struct cgroup *cgrp;
-
-       cgroup_assert_mutexes_or_rcu_locked();
-
-       cgrp = idr_find(&ss->root->cgroup_idr, id);
-       if (cgrp)
-               return cgroup_css(cgrp, ss);
-       return NULL;
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       return idr_find(&ss->css_idr, id);
 }
 
 #ifdef CONFIG_CGROUP_DEBUG
This page took 0.037432 seconds and 5 git commands to generate.