cgroup: make hierarchy iterators deal with cgroup_subsys_state instead of cgroup
[deliverable/linux.git] / kernel / cgroup.c
index 583f8f66a7e1b510141716b2c5d85c68f115ebf5..91eac33fac86f7d2a78332bdca6c130ce555d805 100644 (file)
@@ -1365,6 +1365,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
        INIT_LIST_HEAD(&cgrp->release_list);
        INIT_LIST_HEAD(&cgrp->pidlists);
        mutex_init(&cgrp->pidlist_mutex);
+       cgrp->dummy_css.cgroup = cgrp;
        INIT_LIST_HEAD(&cgrp->event_list);
        spin_lock_init(&cgrp->event_list_lock);
        simple_xattrs_init(&cgrp->xattrs);
@@ -2234,34 +2235,38 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 }
 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
-static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
+static int cgroup_tasks_write(struct cgroup_subsys_state *css,
+                             struct cftype *cft, u64 pid)
 {
-       return attach_task_by_pid(cgrp, pid, false);
+       return attach_task_by_pid(css->cgroup, pid, false);
 }
 
-static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
+static int cgroup_procs_write(struct cgroup_subsys_state *css,
+                             struct cftype *cft, u64 tgid)
 {
-       return attach_task_by_pid(cgrp, tgid, true);
+       return attach_task_by_pid(css->cgroup, tgid, true);
 }
 
-static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
-                                     const char *buffer)
+static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
+                                     struct cftype *cft, const char *buffer)
 {
-       BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+       BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
        if (strlen(buffer) >= PATH_MAX)
                return -EINVAL;
-       if (!cgroup_lock_live_group(cgrp))
+       if (!cgroup_lock_live_group(css->cgroup))
                return -ENODEV;
        mutex_lock(&cgroup_root_mutex);
-       strcpy(cgrp->root->release_agent_path, buffer);
+       strcpy(css->cgroup->root->release_agent_path, buffer);
        mutex_unlock(&cgroup_root_mutex);
        mutex_unlock(&cgroup_mutex);
        return 0;
 }
 
-static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
-                                    struct seq_file *seq)
+static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
+                                    struct cftype *cft, struct seq_file *seq)
 {
+       struct cgroup *cgrp = css->cgroup;
+
        if (!cgroup_lock_live_group(cgrp))
                return -ENODEV;
        seq_puts(seq, cgrp->root->release_agent_path);
@@ -2270,10 +2275,10 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
        return 0;
 }
 
-static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
-                                    struct seq_file *seq)
+static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
+                                    struct cftype *cft, struct seq_file *seq)
 {
-       seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
+       seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
        return 0;
 }
 
@@ -2285,16 +2290,16 @@ static struct cgroup_subsys_state *cgroup_file_css(struct cfent *cfe)
 
        if (cft->ss)
                return cgrp->subsys[cft->ss->subsys_id];
-       return NULL;
+       return &cgrp->dummy_css;
 }
 
 /* A buffer size big enough for numbers or short strings */
 #define CGROUP_LOCAL_BUFFER_SIZE 64
 
-static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
-                               struct file *file,
-                               const char __user *userbuf,
-                               size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
+                               struct cftype *cft, struct file *file,
+                               const char __user *userbuf, size_t nbytes,
+                               loff_t *unused_ppos)
 {
        char buffer[CGROUP_LOCAL_BUFFER_SIZE];
        int retval = 0;
@@ -2312,22 +2317,22 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
                u64 val = simple_strtoull(strstrip(buffer), &end, 0);
                if (*end)
                        return -EINVAL;
-               retval = cft->write_u64(cgrp, cft, val);
+               retval = cft->write_u64(css, cft, val);
        } else {
                s64 val = simple_strtoll(strstrip(buffer), &end, 0);
                if (*end)
                        return -EINVAL;
-               retval = cft->write_s64(cgrp, cft, val);
+               retval = cft->write_s64(css, cft, val);
        }
        if (!retval)
                retval = nbytes;
        return retval;
 }
 
-static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
-                                  struct file *file,
-                                  const char __user *userbuf,
-                                  size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
+                                  struct cftype *cft, struct file *file,
+                                  const char __user *userbuf, size_t nbytes,
+                                  loff_t *unused_ppos)
 {
        char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
        int retval = 0;
@@ -2350,7 +2355,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
        }
 
        buffer[nbytes] = 0;     /* nul-terminate */
-       retval = cft->write_string(cgrp, cft, strstrip(buffer));
+       retval = cft->write_string(css, cft, strstrip(buffer));
        if (!retval)
                retval = nbytes;
 out:
@@ -2360,60 +2365,60 @@ out:
 }
 
 static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
-                                               size_t nbytes, loff_t *ppos)
+                                size_t nbytes, loff_t *ppos)
 {
+       struct cfent *cfe = __d_cfe(file->f_dentry);
        struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+       struct cgroup_subsys_state *css = cgroup_file_css(cfe);
 
        if (cft->write)
-               return cft->write(cgrp, cft, file, buf, nbytes, ppos);
+               return cft->write(css, cft, file, buf, nbytes, ppos);
        if (cft->write_u64 || cft->write_s64)
-               return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
        if (cft->write_string)
-               return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
        if (cft->trigger) {
-               int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+               int ret = cft->trigger(css, (unsigned int)cft->private);
                return ret ? ret : nbytes;
        }
        return -EINVAL;
 }
 
-static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
-                              struct file *file,
-                              char __user *buf, size_t nbytes,
-                              loff_t *ppos)
+static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct file *file,
+                              char __user *buf, size_t nbytes, loff_t *ppos)
 {
        char tmp[CGROUP_LOCAL_BUFFER_SIZE];
-       u64 val = cft->read_u64(cgrp, cft);
+       u64 val = cft->read_u64(css, cft);
        int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
 
        return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
-static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
-                              struct file *file,
-                              char __user *buf, size_t nbytes,
-                              loff_t *ppos)
+static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
+                              struct cftype *cft, struct file *file,
+                              char __user *buf, size_t nbytes, loff_t *ppos)
 {
        char tmp[CGROUP_LOCAL_BUFFER_SIZE];
-       s64 val = cft->read_s64(cgrp, cft);
+       s64 val = cft->read_s64(css, cft);
        int len = sprintf(tmp, "%lld\n", (long long) val);
 
        return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
 static ssize_t cgroup_file_read(struct file *file, char __user *buf,
-                                  size_t nbytes, loff_t *ppos)
+                               size_t nbytes, loff_t *ppos)
 {
+       struct cfent *cfe = __d_cfe(file->f_dentry);
        struct cftype *cft = __d_cft(file->f_dentry);
-       struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+       struct cgroup_subsys_state *css = cgroup_file_css(cfe);
 
        if (cft->read)
-               return cft->read(cgrp, cft, file, buf, nbytes, ppos);
+               return cft->read(css, cft, file, buf, nbytes, ppos);
        if (cft->read_u64)
-               return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
        if (cft->read_s64)
-               return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
+               return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
        return -EINVAL;
 }
 
@@ -2432,16 +2437,16 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
 {
        struct cfent *cfe = m->private;
        struct cftype *cft = cfe->type;
-       struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
+       struct cgroup_subsys_state *css = cgroup_file_css(cfe);
 
        if (cft->read_map) {
                struct cgroup_map_cb cb = {
                        .fill = cgroup_map_add,
                        .state = m,
                };
-               return cft->read_map(cgrp, cft, &cb);
+               return cft->read_map(css, cft, &cb);
        }
-       return cft->read_seq_string(cgrp, cft, m);
+       return cft->read_seq_string(css, cft, m);
 }
 
 static const struct file_operations cgroup_seqfile_operations = {
@@ -2467,7 +2472,7 @@ static int cgroup_file_open(struct inode *inode, struct file *file)
         * unpinned either on open failure or release.  This ensures that
         * @css stays alive for all file operations.
         */
-       if (css && !css_tryget(css))
+       if (css->ss && !css_tryget(css))
                return -ENODEV;
 
        if (cft->read_map || cft->read_seq_string) {
@@ -2477,7 +2482,7 @@ static int cgroup_file_open(struct inode *inode, struct file *file)
                err = cft->open(inode, file);
        }
 
-       if (css && err)
+       if (css->ss && err)
                css_put(css);
        return err;
 }
@@ -2491,7 +2496,7 @@ static int cgroup_file_release(struct inode *inode, struct file *file)
 
        if (cft->release)
                ret = cft->release(inode, file);
-       if (css)
+       if (css->ss)
                css_put(css);
        return ret;
 }
@@ -2809,8 +2814,8 @@ static void cgroup_cfts_prepare(void)
        /*
         * Thanks to the entanglement with vfs inode locking, we can't walk
         * the existing cgroups under cgroup_mutex and create files.
-        * Instead, we use cgroup_for_each_descendant_pre() and drop RCU
-        * read lock before calling cgroup_addrm_files().
+        * Instead, we use css_for_each_descendant_pre() and drop RCU read
+        * lock before calling cgroup_addrm_files().
         */
        mutex_lock(&cgroup_mutex);
 }
@@ -2820,10 +2825,11 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
 {
        LIST_HEAD(pending);
        struct cgroup_subsys *ss = cfts[0].ss;
-       struct cgroup *cgrp, *root = &ss->root->top_cgroup;
+       struct cgroup *root = &ss->root->top_cgroup;
        struct super_block *sb = ss->root->sb;
        struct dentry *prev = NULL;
        struct inode *inode;
+       struct cgroup_subsys_state *css;
        u64 update_before;
        int ret = 0;
 
@@ -2856,7 +2862,9 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
 
        /* add/rm files for all cgroups created before */
        rcu_read_lock();
-       cgroup_for_each_descendant_pre(cgrp, root) {
+       css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) {
+               struct cgroup *cgrp = css->cgroup;
+
                if (cgroup_is_dead(cgrp))
                        continue;
 
@@ -3032,16 +3040,21 @@ static void cgroup_enable_task_cg_lists(void)
 }
 
 /**
- * cgroup_next_sibling - find the next sibling of a given cgroup
- * @pos: the current cgroup
+ * css_next_child - find the next child of a given css
+ * @pos_css: the current position (%NULL to initiate traversal)
+ * @parent_css: css whose children to walk
  *
- * This function returns the next sibling of @pos and should be called
- * under RCU read lock.  The only requirement is that @pos is accessible.
- * The next sibling is guaranteed to be returned regardless of @pos's
- * state.
+ * This function returns the next child of @parent_css and should be called
+ * under RCU read lock.  The only requirement is that @parent_css and
+ * @pos_css are accessible.  The next sibling is guaranteed to be returned
+ * regardless of their states.
  */
-struct cgroup *cgroup_next_sibling(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_next_child(struct cgroup_subsys_state *pos_css,
+              struct cgroup_subsys_state *parent_css)
 {
+       struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
+       struct cgroup *cgrp = parent_css->cgroup;
        struct cgroup *next;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
@@ -3056,78 +3069,83 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos)
         * safe to dereference from this RCU critical section.  If
         * ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
         * to be visible as %true here.
+        *
+        * If @pos is dead, its next pointer can't be dereferenced;
+        * however, as each cgroup is given a monotonically increasing
+        * unique serial number and always appended to the sibling list,
+        * the next one can be found by walking the parent's children until
+        * we see a cgroup with higher serial number than @pos's.  While
+        * this path can be slower, it's taken only when either the current
+        * cgroup is removed or iteration and removal race.
         */
-       if (likely(!cgroup_is_dead(pos))) {
+       if (!pos) {
+               next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
+       } else if (likely(!cgroup_is_dead(pos))) {
                next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
-               if (&next->sibling != &pos->parent->children)
-                       return next;
-               return NULL;
+       } else {
+               list_for_each_entry_rcu(next, &cgrp->children, sibling)
+                       if (next->serial_nr > pos->serial_nr)
+                               break;
        }
 
-       /*
-        * Can't dereference the next pointer.  Each cgroup is given a
-        * monotonically increasing unique serial number and always
-        * appended to the sibling list, so the next one can be found by
-        * walking the parent's children until we see a cgroup with higher
-        * serial number than @pos's.
-        *
-        * While this path can be slow, it's taken only when either the
-        * current cgroup is removed or iteration and removal race.
-        */
-       list_for_each_entry_rcu(next, &pos->parent->children, sibling)
-               if (next->serial_nr > pos->serial_nr)
-                       return next;
-       return NULL;
+       if (&next->sibling == &cgrp->children)
+               return NULL;
+
+       if (parent_css->ss)
+               return cgroup_css(next, parent_css->ss->subsys_id);
+       else
+               return &next->dummy_css;
 }
-EXPORT_SYMBOL_GPL(cgroup_next_sibling);
+EXPORT_SYMBOL_GPL(css_next_child);
 
 /**
- * cgroup_next_descendant_pre - find the next descendant for pre-order walk
+ * css_next_descendant_pre - find the next descendant for pre-order walk
  * @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
  *
- * To be used by cgroup_for_each_descendant_pre().  Find the next
- * descendant to visit for pre-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_pre().  Find the next descendant
+ * to visit for pre-order traversal of @root's descendants.
  *
  * While this function requires RCU read locking, it doesn't require the
  * whole traversal to be contained in a single RCU critical section.  This
  * function will return the correct next descendant as long as both @pos
- * and @cgroup are accessible and @pos is a descendant of @cgroup.
+ * and @root are accessible and @pos is a descendant of @root.
  */
-struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
-                                         struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+                       struct cgroup_subsys_state *root)
 {
-       struct cgroup *next;
+       struct cgroup_subsys_state *next;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* if first iteration, pretend we just visited @cgroup */
+       /* if first iteration, pretend we just visited @root */
        if (!pos)
-               pos = cgroup;
+               pos = root;
 
        /* visit the first child if exists */
-       next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
+       next = css_next_child(NULL, pos);
        if (next)
                return next;
 
        /* no child, visit my or the closest ancestor's next sibling */
-       while (pos != cgroup) {
-               next = cgroup_next_sibling(pos);
+       while (pos != root) {
+               next = css_next_child(pos, css_parent(pos));
                if (next)
                        return next;
-               pos = pos->parent;
+               pos = css_parent(pos);
        }
 
        return NULL;
 }
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
+EXPORT_SYMBOL_GPL(css_next_descendant_pre);
 
 /**
- * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
- * @pos: cgroup of interest
+ * css_rightmost_descendant - return the rightmost descendant of a css
+ * @pos: css of interest
  *
- * Return the rightmost descendant of @pos.  If there's no descendant,
- * @pos is returned.  This can be used during pre-order traversal to skip
+ * Return the rightmost descendant of @pos.  If there's no descendant, @pos
+ * is returned.  This can be used during pre-order traversal to skip
  * subtree of @pos.
  *
  * While this function requires RCU read locking, it doesn't require the
@@ -3135,9 +3153,10 @@ EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
  * function will return the correct rightmost descendant as long as @pos is
  * accessible.
  */
-struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos)
 {
-       struct cgroup *last, *tmp;
+       struct cgroup_subsys_state *last, *tmp;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
@@ -3145,63 +3164,64 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
                last = pos;
                /* ->prev isn't RCU safe, walk ->next till the end */
                pos = NULL;
-               list_for_each_entry_rcu(tmp, &last->children, sibling)
+               css_for_each_child(tmp, last)
                        pos = tmp;
        } while (pos);
 
        return last;
 }
-EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
+EXPORT_SYMBOL_GPL(css_rightmost_descendant);
 
-static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
+static struct cgroup_subsys_state *
+css_leftmost_descendant(struct cgroup_subsys_state *pos)
 {
-       struct cgroup *last;
+       struct cgroup_subsys_state *last;
 
        do {
                last = pos;
-               pos = list_first_or_null_rcu(&pos->children, struct cgroup,
-                                            sibling);
+               pos = css_next_child(NULL, pos);
        } while (pos);
 
        return last;
 }
 
 /**
- * cgroup_next_descendant_post - find the next descendant for post-order walk
+ * css_next_descendant_post - find the next descendant for post-order walk
  * @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
  *
- * To be used by cgroup_for_each_descendant_post().  Find the next
- * descendant to visit for post-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_post().  Find the next descendant
+ * to visit for post-order traversal of @root's descendants.
  *
  * While this function requires RCU read locking, it doesn't require the
  * whole traversal to be contained in a single RCU critical section.  This
  * function will return the correct next descendant as long as both @pos
  * and @cgroup are accessible and @pos is a descendant of @cgroup.
  */
-struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
-                                          struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+                        struct cgroup_subsys_state *root)
 {
-       struct cgroup *next;
+       struct cgroup_subsys_state *next;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
        /* if first iteration, visit the leftmost descendant */
        if (!pos) {
-               next = cgroup_leftmost_descendant(cgroup);
-               return next != cgroup ? next : NULL;
+               next = css_leftmost_descendant(root);
+               return next != root ? next : NULL;
        }
 
        /* if there's an unvisited sibling, visit its leftmost descendant */
-       next = cgroup_next_sibling(pos);
+       next = css_next_child(pos, css_parent(pos));
        if (next)
-               return cgroup_leftmost_descendant(next);
+               return css_leftmost_descendant(next);
 
        /* no sibling left, visit parent */
-       next = pos->parent;
-       return next != cgroup ? next : NULL;
+       next = css_parent(pos);
+       return next != root ? next : NULL;
 }
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_post);
+EXPORT_SYMBOL_GPL(css_next_descendant_post);
 
 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
        __acquires(css_set_lock)
@@ -3859,21 +3879,20 @@ static int cgroup_procs_open(struct inode *unused, struct file *file)
        return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
 }
 
-static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
-                                           struct cftype *cft)
+static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
+                                        struct cftype *cft)
 {
-       return notify_on_release(cgrp);
+       return notify_on_release(css->cgroup);
 }
 
-static int cgroup_write_notify_on_release(struct cgroup *cgrp,
-                                         struct cftype *cft,
-                                         u64 val)
+static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
+                                         struct cftype *cft, u64 val)
 {
-       clear_bit(CGRP_RELEASABLE, &cgrp->flags);
+       clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
        if (val)
-               set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+               set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
        else
-               clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+               clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
        return 0;
 }
 
@@ -3971,9 +3990,10 @@ static void cgroup_event_ptable_queue_proc(struct file *file,
  * Input must be in format '<event_fd> <control_fd> <args>'.
  * Interpretation of args is defined by control file implementation.
  */
-static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
-                                     const char *buffer)
+static int cgroup_write_event_control(struct cgroup_subsys_state *css,
+                                     struct cftype *cft, const char *buffer)
 {
+       struct cgroup *cgrp = css->cgroup;
        struct cgroup_event *event;
        struct cgroup *cgrp_cfile;
        unsigned int efd, cfd;
@@ -4081,20 +4101,19 @@ out_kfree:
        return ret;
 }
 
-static u64 cgroup_clone_children_read(struct cgroup *cgrp,
-                                   struct cftype *cft)
+static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
+                                     struct cftype *cft)
 {
-       return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+       return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
 }
 
-static int cgroup_clone_children_write(struct cgroup *cgrp,
-                                    struct cftype *cft,
-                                    u64 val)
+static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
+                                      struct cftype *cft, u64 val)
 {
        if (val)
-               set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+               set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
        else
-               clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+               clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
        return 0;
 }
 
@@ -4545,9 +4564,9 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
        /*
         * Mark @cgrp dead.  This prevents further task migration and child
         * creation by disabling cgroup_lock_live_group().  Note that
-        * CGRP_DEAD assertion is depended upon by cgroup_next_sibling() to
+        * CGRP_DEAD assertion is depended upon by css_next_child() to
         * resume iteration after dropping RCU read lock.  See
-        * cgroup_next_sibling() for details.
+        * css_next_child() for details.
         */
        set_bit(CGRP_DEAD, &cgrp->flags);
 
@@ -5584,17 +5603,19 @@ static void debug_css_free(struct cgroup_subsys_state *css)
        kfree(css);
 }
 
-static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
+                               struct cftype *cft)
 {
-       return cgroup_task_count(cgrp);
+       return cgroup_task_count(css->cgroup);
 }
 
-static u64 current_css_set_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 current_css_set_read(struct cgroup_subsys_state *css,
+                               struct cftype *cft)
 {
        return (u64)(unsigned long)current->cgroups;
 }
 
-static u64 current_css_set_refcount_read(struct cgroup *cgrp,
+static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
                                         struct cftype *cft)
 {
        u64 count;
@@ -5605,7 +5626,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cgrp,
        return count;
 }
 
-static int current_css_set_cg_links_read(struct cgroup *cgrp,
+static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
                                         struct cftype *cft,
                                         struct seq_file *seq)
 {
@@ -5632,14 +5653,13 @@ static int current_css_set_cg_links_read(struct cgroup *cgrp,
 }
 
 #define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct cgroup *cgrp,
-                                struct cftype *cft,
-                                struct seq_file *seq)
+static int cgroup_css_links_read(struct cgroup_subsys_state *css,
+                                struct cftype *cft, struct seq_file *seq)
 {
        struct cgrp_cset_link *link;
 
        read_lock(&css_set_lock);
-       list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+       list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
                struct css_set *cset = link->cset;
                struct task_struct *task;
                int count = 0;
@@ -5658,9 +5678,9 @@ static int cgroup_css_links_read(struct cgroup *cgrp,
        return 0;
 }
 
-static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+       return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
 }
 
 static struct cftype debug_files[] =  {
This page took 0.040117 seconds and 5 git commands to generate.