cgroup: reorganize cgroup_create()
[deliverable/linux.git] / kernel / cgroup.c
index 07815ef7b1f6eb89655d2e47d136befff02602cc..1d6106c3fb4e22d41b270bc10de15f79450da7e2 100644 (file)
@@ -203,9 +203,9 @@ static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
        int ret;
 
        idr_preload(gfp_mask);
-       spin_lock(&cgroup_idr_lock);
+       spin_lock_bh(&cgroup_idr_lock);
        ret = idr_alloc(idr, ptr, start, end, gfp_mask);
-       spin_unlock(&cgroup_idr_lock);
+       spin_unlock_bh(&cgroup_idr_lock);
        idr_preload_end();
        return ret;
 }
@@ -214,17 +214,17 @@ static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
 {
        void *ret;
 
-       spin_lock(&cgroup_idr_lock);
+       spin_lock_bh(&cgroup_idr_lock);
        ret = idr_replace(idr, ptr, id);
-       spin_unlock(&cgroup_idr_lock);
+       spin_unlock_bh(&cgroup_idr_lock);
        return ret;
 }
 
 static void cgroup_idr_remove(struct idr *idr, int id)
 {
-       spin_lock(&cgroup_idr_lock);
+       spin_lock_bh(&cgroup_idr_lock);
        idr_remove(idr, id);
-       spin_unlock(&cgroup_idr_lock);
+       spin_unlock_bh(&cgroup_idr_lock);
 }
 
 /**
@@ -283,11 +283,10 @@ static inline bool cgroup_is_dead(const struct cgroup *cgrp)
        return test_bit(CGRP_DEAD, &cgrp->flags);
 }
 
-struct cgroup_subsys_state *seq_css(struct seq_file *seq)
+struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
 {
-       struct kernfs_open_file *of = seq->private;
        struct cgroup *cgrp = of->kn->parent->priv;
-       struct cftype *cft = seq_cft(seq);
+       struct cftype *cft = of_cft(of);
 
        /*
         * This is open and unprotected implementation of cgroup_css().
@@ -302,7 +301,7 @@ struct cgroup_subsys_state *seq_css(struct seq_file *seq)
        else
                return &cgrp->dummy_css;
 }
-EXPORT_SYMBOL_GPL(seq_css);
+EXPORT_SYMBOL_GPL(of_css);
 
 /**
  * cgroup_is_descendant - test ancestry
@@ -439,7 +438,7 @@ struct cgrp_cset_link {
  * reference-counted, to improve performance when child cgroups
  * haven't been created.
  */
-static struct css_set init_css_set = {
+struct css_set init_css_set = {
        .refcount               = ATOMIC_INIT(1),
        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
@@ -1035,8 +1034,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
        if (cft->read_u64 || cft->read_s64 || cft->seq_show)
                mode |= S_IRUGO;
 
-       if (cft->write_u64 || cft->write_s64 || cft->write_string ||
-           cft->trigger)
+       if (cft->write_u64 || cft->write_s64 || cft->write)
                mode |= S_IWUSR;
 
        return mode;
@@ -2234,12 +2232,18 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
  * function to attach either it or all tasks in its threadgroup. Will lock
  * cgroup_mutex and threadgroup.
  */
-static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
+static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
+                                   size_t nbytes, loff_t off, bool threadgroup)
 {
        struct task_struct *tsk;
        const struct cred *cred = current_cred(), *tcred;
+       struct cgroup *cgrp = of_css(of)->cgroup;
+       pid_t pid;
        int ret;
 
+       if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
+               return -EINVAL;
+
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
 
@@ -2307,7 +2311,7 @@ retry_find_task:
        put_task_struct(tsk);
 out_unlock_cgroup:
        mutex_unlock(&cgroup_mutex);
-       return ret;
+       return ret ?: nbytes;
 }
 
 /**
@@ -2341,43 +2345,43 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
-static int cgroup_tasks_write(struct cgroup_subsys_state *css,
-                             struct cftype *cft, u64 pid)
+static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
+                                 char *buf, size_t nbytes, loff_t off)
 {
-       return attach_task_by_pid(css->cgroup, pid, false);
+       return __cgroup_procs_write(of, buf, nbytes, off, false);
 }
 
-static int cgroup_procs_write(struct cgroup_subsys_state *css,
-                             struct cftype *cft, u64 tgid)
+static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
+                                 char *buf, size_t nbytes, loff_t off)
 {
-       return attach_task_by_pid(css->cgroup, tgid, true);
+       return __cgroup_procs_write(of, buf, nbytes, off, true);
 }
 
-static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
-                                     struct cftype *cft, char *buffer)
+static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
+                                         char *buf, size_t nbytes, loff_t off)
 {
-       struct cgroup_root *root = css->cgroup->root;
+       struct cgroup *cgrp = of_css(of)->cgroup;
+       struct cgroup_root *root = cgrp->root;
 
        BUILD_BUG_ON(sizeof(root->release_agent_path) < PATH_MAX);
-       if (!cgroup_lock_live_group(css->cgroup))
+       if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
        spin_lock(&release_agent_path_lock);
-       strlcpy(root->release_agent_path, buffer,
+       strlcpy(root->release_agent_path, strstrip(buf),
                sizeof(root->release_agent_path));
        spin_unlock(&release_agent_path_lock);
        mutex_unlock(&cgroup_mutex);
-       return 0;
+       return nbytes;
 }
 
 static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
 
-       if (!cgroup_lock_live_group(cgrp))
-               return -ENODEV;
+       spin_lock(&release_agent_path_lock);
        seq_puts(seq, cgrp->root->release_agent_path);
+       spin_unlock(&release_agent_path_lock);
        seq_putc(seq, '\n');
-       mutex_unlock(&cgroup_mutex);
        return 0;
 }
 
@@ -2532,31 +2536,34 @@ out_finish:
 }
 
 /* change the enabled child controllers for a cgroup in the default hierarchy */
-static int cgroup_subtree_control_write(struct cgroup_subsys_state *dummy_css,
-                                       struct cftype *cft, char *buffer)
+static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
+                                           char *buf, size_t nbytes,
+                                           loff_t off)
 {
-       unsigned int enable_req = 0, disable_req = 0, enable, disable;
-       struct cgroup *cgrp = dummy_css->cgroup, *child;
+       unsigned int enable = 0, disable = 0;
+       struct cgroup *cgrp = of_css(of)->cgroup, *child;
        struct cgroup_subsys *ss;
-       char *tok, *p;
+       char *tok;
        int ssid, ret;
 
        /*
-        * Parse input - white space separated list of subsystem names
-        * prefixed with either + or -.
+        * Parse input - space separated list of subsystem names prefixed
+        * with either + or -.
         */
-       p = buffer;
-       while ((tok = strsep(&p, " \t\n"))) {
+       buf = strstrip(buf);
+       while ((tok = strsep(&buf, " "))) {
+               if (tok[0] == '\0')
+                       continue;
                for_each_subsys(ss, ssid) {
                        if (ss->disabled || strcmp(tok + 1, ss->name))
                                continue;
 
                        if (*tok == '+') {
-                               enable_req |= 1 << ssid;
-                               disable_req &= ~(1 << ssid);
+                               enable |= 1 << ssid;
+                               disable &= ~(1 << ssid);
                        } else if (*tok == '-') {
-                               disable_req |= 1 << ssid;
-                               enable_req &= ~(1 << ssid);
+                               disable |= 1 << ssid;
+                               enable &= ~(1 << ssid);
                        } else {
                                return -EINVAL;
                        }
@@ -2573,10 +2580,7 @@ static int cgroup_subtree_control_write(struct cgroup_subsys_state *dummy_css,
         * active_ref protection.
         */
        cgroup_get(cgrp);
-       kernfs_break_active_protection(cgrp->control_kn);
-retry:
-       enable = enable_req;
-       disable = disable_req;
+       kernfs_break_active_protection(of->kn);
 
        mutex_lock(&cgroup_tree_mutex);
 
@@ -2594,17 +2598,21 @@ retry:
                         * cases, wait till it's gone using offline_waitq.
                         */
                        cgroup_for_each_live_child(child, cgrp) {
-                               wait_queue_t wait;
+                               DEFINE_WAIT(wait);
 
                                if (!cgroup_css(child, ss))
                                        continue;
 
+                               cgroup_get(child);
                                prepare_to_wait(&child->offline_waitq, &wait,
                                                TASK_UNINTERRUPTIBLE);
                                mutex_unlock(&cgroup_tree_mutex);
                                schedule();
                                finish_wait(&child->offline_waitq, &wait);
-                               goto retry;
+                               cgroup_put(child);
+
+                               ret = restart_syscall();
+                               goto out_unbreak;
                        }
 
                        /* unavailable or not enabled on the parent? */
@@ -2688,9 +2696,10 @@ out_unlock:
        mutex_unlock(&cgroup_mutex);
 out_unlock_tree:
        mutex_unlock(&cgroup_tree_mutex);
-       kernfs_unbreak_active_protection(cgrp->control_kn);
+out_unbreak:
+       kernfs_unbreak_active_protection(of->kn);
        cgroup_put(cgrp);
-       return ret;
+       return ret ?: nbytes;
 
 err_undo_css:
        cgrp->child_subsys_mask &= ~enable;
@@ -2723,6 +2732,9 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
        struct cgroup_subsys_state *css;
        int ret;
 
+       if (cft->write)
+               return cft->write(of, buf, nbytes, off);
+
        /*
         * kernfs guarantees that a file isn't deleted with operations in
         * flight, which means that the matching css is and stays alive and
@@ -2733,9 +2745,7 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
        css = cgroup_css(cgrp, cft->ss);
        rcu_read_unlock();
 
-       if (cft->write_string) {
-               ret = cft->write_string(css, cft, strstrip(buf));
-       } else if (cft->write_u64) {
+       if (cft->write_u64) {
                unsigned long long v;
                ret = kstrtoull(buf, 0, &v);
                if (!ret)
@@ -2745,8 +2755,6 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
                ret = kstrtoll(buf, 0, &v);
                if (!ret)
                        ret = cft->write_s64(css, cft, v);
-       } else if (cft->trigger) {
-               ret = cft->trigger(css, (unsigned int)cft->private);
        } else {
                ret = -EINVAL;
        }
@@ -2879,9 +2887,7 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
                return ret;
        }
 
-       if (cft->seq_show == cgroup_subtree_control_show)
-               cgrp->control_kn = kn;
-       else if (cft->seq_show == cgroup_populated_show)
+       if (cft->seq_show == cgroup_populated_show)
                cgrp->populated_kn = kn;
        return 0;
 }
@@ -3768,7 +3774,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
 
        /*
         * We aren't being called from kernfs and there's no guarantee on
-        * @kn->priv's validity.  For this and css_tryget_from_dir(),
+        * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
         * @kn->priv is RCU safe.  Let's do the RCU dancing.
         */
        rcu_read_lock();
@@ -3951,7 +3957,7 @@ static struct cftype cgroup_base_files[] = {
                .seq_stop = cgroup_pidlist_stop,
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_PROCS,
-               .write_u64 = cgroup_procs_write,
+               .write = cgroup_procs_write,
                .mode = S_IRUGO | S_IWUSR,
        },
        {
@@ -3979,7 +3985,7 @@ static struct cftype cgroup_base_files[] = {
                .name = "cgroup.subtree_control",
                .flags = CFTYPE_ONLY_ON_DFL,
                .seq_show = cgroup_subtree_control_show,
-               .write_string = cgroup_subtree_control_write,
+               .write = cgroup_subtree_control_write,
        },
        {
                .name = "cgroup.populated",
@@ -4000,7 +4006,7 @@ static struct cftype cgroup_base_files[] = {
                .seq_stop = cgroup_pidlist_stop,
                .seq_show = cgroup_pidlist_show,
                .private = CGROUP_FILE_TASKS,
-               .write_u64 = cgroup_tasks_write,
+               .write = cgroup_tasks_write,
                .mode = S_IRUGO | S_IWUSR,
        },
        {
@@ -4013,7 +4019,7 @@ static struct cftype cgroup_base_files[] = {
                .name = "release_agent",
                .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
                .seq_show = cgroup_release_agent_show,
-               .write_string = cgroup_release_agent_write,
+               .write = cgroup_release_agent_write,
                .max_write_len = PATH_MAX - 1,
        },
        { }     /* terminate */
@@ -4057,9 +4063,9 @@ err:
  *    Implemented in kill_css().
  *
  * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
- *    and thus css_tryget() is guaranteed to fail, the css can be offlined
- *    by invoking offline_css().  After offlining, the base ref is put.
- *    Implemented in css_killed_work_fn().
+ *    and thus css_tryget_online() is guaranteed to fail, the css can be
+ *    offlined by invoking offline_css().  After offlining, the base ref is
+ *    put.  Implemented in css_killed_work_fn().
  *
  * 3. When the percpu_ref reaches zero, the only possible remaining
  *    accessors are inside RCU read sections.  css_release() schedules the
@@ -4100,7 +4106,6 @@ static void css_release(struct percpu_ref *ref)
                container_of(ref, struct cgroup_subsys_state, refcnt);
        struct cgroup_subsys *ss = css->ss;
 
-       RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
        cgroup_idr_remove(&ss->css_idr, css->id);
 
        call_rcu(&css->rcu_head, css_free_rcu_fn);
@@ -4241,15 +4246,10 @@ static long cgroup_create(struct cgroup *parent, const char *name,
 {
        struct cgroup *cgrp;
        struct cgroup_root *root = parent->root;
-       int ssid, err;
+       int ssid, ret;
        struct cgroup_subsys *ss;
        struct kernfs_node *kn;
 
-       /* allocate the cgroup and its ID, 0 is reserved for the root */
-       cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
-       if (!cgrp)
-               return -ENOMEM;
-
        mutex_lock(&cgroup_tree_mutex);
 
        /*
@@ -4260,8 +4260,15 @@ static long cgroup_create(struct cgroup *parent, const char *name,
         * don't get nasty surprises if we ever grow another caller.
         */
        if (!cgroup_lock_live_group(parent)) {
-               err = -ENODEV;
-               goto err_unlock_tree;
+               ret = -ENODEV;
+               goto out_unlock_tree;
+       }
+
+       /* allocate the cgroup and its ID, 0 is reserved for the root */
+       cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
+       if (!cgrp) {
+               ret = -ENOMEM;
+               goto out_unlock;
        }
 
        /*
@@ -4270,15 +4277,15 @@ static long cgroup_create(struct cgroup *parent, const char *name,
         */
        cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_NOWAIT);
        if (cgrp->id < 0) {
-               err = -ENOMEM;
-               goto err_unlock;
+               ret = -ENOMEM;
+               goto out_free_cgrp;
        }
 
        init_cgroup_housekeeping(cgrp);
 
        cgrp->parent = parent;
        cgrp->dummy_css.parent = &parent->dummy_css;
-       cgrp->root = parent->root;
+       cgrp->root = root;
 
        if (notify_on_release(parent))
                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
@@ -4289,8 +4296,8 @@ static long cgroup_create(struct cgroup *parent, const char *name,
        /* create the directory */
        kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
        if (IS_ERR(kn)) {
-               err = PTR_ERR(kn);
-               goto err_free_id;
+               ret = PTR_ERR(kn);
+               goto out_free_id;
        }
        cgrp->kn = kn;
 
@@ -4313,20 +4320,20 @@ static long cgroup_create(struct cgroup *parent, const char *name,
         */
        cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
 
-       err = cgroup_kn_set_ugid(kn);
-       if (err)
-               goto err_destroy;
+       ret = cgroup_kn_set_ugid(kn);
+       if (ret)
+               goto out_destroy;
 
-       err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
-       if (err)
-               goto err_destroy;
+       ret = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+       if (ret)
+               goto out_destroy;
 
        /* let's create and online css's */
        for_each_subsys(ss, ssid) {
                if (parent->child_subsys_mask & (1 << ssid)) {
-                       err = create_css(cgrp, ss);
-                       if (err)
-                               goto err_destroy;
+                       ret = create_css(cgrp, ss);
+                       if (ret)
+                               goto out_destroy;
                }
        }
 
@@ -4339,25 +4346,22 @@ static long cgroup_create(struct cgroup *parent, const char *name,
 
        kernfs_activate(kn);
 
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgroup_tree_mutex);
-
-       return 0;
+       ret = 0;
+       goto out_unlock;
 
-err_free_id:
+out_free_id:
        cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
-err_unlock:
+out_free_cgrp:
+       kfree(cgrp);
+out_unlock:
        mutex_unlock(&cgroup_mutex);
-err_unlock_tree:
+out_unlock_tree:
        mutex_unlock(&cgroup_tree_mutex);
-       kfree(cgrp);
-       return err;
+       return ret;
 
-err_destroy:
+out_destroy:
        cgroup_destroy_locked(cgrp);
-       mutex_unlock(&cgroup_mutex);
-       mutex_unlock(&cgroup_tree_mutex);
-       return err;
+       goto out_unlock;
 }
 
 static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
@@ -4384,7 +4388,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 
 /*
  * This is called when the refcnt of a css is confirmed to be killed.
- * css_tryget() is now guaranteed to fail.
+ * css_tryget_online() is now guaranteed to fail.
  */
 static void css_killed_work_fn(struct work_struct *work)
 {
@@ -4396,8 +4400,8 @@ static void css_killed_work_fn(struct work_struct *work)
        mutex_lock(&cgroup_mutex);
 
        /*
-        * css_tryget() is guaranteed to fail now.  Tell subsystems to
-        * initate destruction.
+        * css_tryget_online() is guaranteed to fail now.  Tell subsystems
+        * to initate destruction.
         */
        offline_css(css);
 
@@ -4438,8 +4442,8 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
  *
  * This function initiates destruction of @css by removing cgroup interface
  * files and putting its base reference.  ->css_offline() will be invoked
- * asynchronously once css_tryget() is guaranteed to fail and when the
- * reference count reaches zero, @css will be released.
+ * asynchronously once css_tryget_online() is guaranteed to fail and when
+ * the reference count reaches zero, @css will be released.
  */
 static void kill_css(struct cgroup_subsys_state *css)
 {
@@ -4460,7 +4464,7 @@ static void kill_css(struct cgroup_subsys_state *css)
        /*
         * cgroup core guarantees that, by the time ->css_offline() is
         * invoked, no new css reference will be given out via
-        * css_tryget().  We can't simply call percpu_ref_kill() and
+        * css_tryget_online().  We can't simply call percpu_ref_kill() and
         * proceed to offlining css's because percpu_ref_kill() doesn't
         * guarantee that the ref is seen as killed on all CPUs on return.
         *
@@ -4476,9 +4480,9 @@ static void kill_css(struct cgroup_subsys_state *css)
  *
  * css's make use of percpu refcnts whose killing latency shouldn't be
  * exposed to userland and are RCU protected.  Also, cgroup core needs to
- * guarantee that css_tryget() won't succeed by the time ->css_offline() is
- * invoked.  To satisfy all the requirements, destruction is implemented in
- * the following two steps.
+ * guarantee that css_tryget_online() won't succeed by the time
+ * ->css_offline() is invoked.  To satisfy all the requirements,
+ * destruction is implemented in the following two steps.
  *
  * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
  *     userland visible parts and start killing the percpu refcnts of
@@ -4572,9 +4576,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        /*
         * There are two control paths which try to determine cgroup from
         * dentry without going through kernfs - cgroupstats_build() and
-        * css_tryget_from_dir().  Those are supported by RCU protecting
-        * clearing of cgrp->kn->priv backpointer, which should happen
-        * after all files under it have been removed.
+        * css_tryget_online_from_dir().  Those are supported by RCU
+        * protecting clearing of cgrp->kn->priv backpointer, which should
+        * happen after all files under it have been removed.
         */
        kernfs_remove(cgrp->kn);        /* @cgrp has an extra ref on its kn */
        RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL);
@@ -5171,7 +5175,7 @@ static int __init cgroup_disable(char *str)
 __setup("cgroup_disable=", cgroup_disable);
 
 /**
- * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
+ * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  * @dentry: directory dentry of interest
  * @ss: subsystem of interest
  *
@@ -5179,8 +5183,8 @@ __setup("cgroup_disable=", cgroup_disable);
  * to get the corresponding css and return it.  If such css doesn't exist
  * or can't be pinned, an ERR_PTR value is returned.
  */
-struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
-                                               struct cgroup_subsys *ss)
+struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
+                                                      struct cgroup_subsys *ss)
 {
        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
        struct cgroup_subsys_state *css = NULL;
@@ -5202,7 +5206,7 @@ struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
        if (cgrp)
                css = cgroup_css(cgrp, ss);
 
-       if (!css || !css_tryget(css))
+       if (!css || !css_tryget_online(css))
                css = ERR_PTR(-ENOENT);
 
        rcu_read_unlock();
This page took 0.044694 seconds and 5 git commands to generate.