cgroup: remove now unused css_depth()
[deliverable/linux.git] / include / linux / cgroup.h
1 #ifndef _LINUX_CGROUP_H
2 #define _LINUX_CGROUP_H
3 /*
4 * cgroup interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/prio_heap.h>
18 #include <linux/rwsem.h>
19 #include <linux/idr.h>
20 #include <linux/workqueue.h>
21 #include <linux/xattr.h>
22 #include <linux/fs.h>
23
24 #ifdef CONFIG_CGROUPS
25
26 struct cgroupfs_root;
27 struct cgroup_subsys;
28 struct inode;
29 struct cgroup;
30 struct css_id;
31 struct eventfd_ctx;
32
33 extern int cgroup_init_early(void);
34 extern int cgroup_init(void);
35 extern void cgroup_fork(struct task_struct *p);
36 extern void cgroup_post_fork(struct task_struct *p);
37 extern void cgroup_exit(struct task_struct *p, int run_callbacks);
38 extern int cgroupstats_build(struct cgroupstats *stats,
39 struct dentry *dentry);
40 extern int cgroup_load_subsys(struct cgroup_subsys *ss);
41 extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
42
43 extern int proc_cgroup_show(struct seq_file *, void *);
44
45 /*
46 * Define the enumeration of all cgroup subsystems.
47 *
48 * We define ids for builtin subsystems and then modular ones.
49 */
50 #define SUBSYS(_x) _x ## _subsys_id,
51 enum cgroup_subsys_id {
52 #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
53 #include <linux/cgroup_subsys.h>
54 #undef IS_SUBSYS_ENABLED
55 CGROUP_BUILTIN_SUBSYS_COUNT,
56
57 __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,
58
59 #define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
60 #include <linux/cgroup_subsys.h>
61 #undef IS_SUBSYS_ENABLED
62 CGROUP_SUBSYS_COUNT,
63 };
64 #undef SUBSYS
65
66 /* Per-subsystem/per-cgroup state maintained by the system. */
67 struct cgroup_subsys_state {
68 /*
69 * The cgroup that this subsystem is attached to. Useful
70 * for subsystems that want to know about the cgroup
71 * hierarchy structure
72 */
73 struct cgroup *cgroup;
74
75 /*
76 * State maintained by the cgroup system to allow subsystems
77 * to be "busy". Should be accessed via css_get(),
78 * css_tryget() and css_put().
79 */
80
81 atomic_t refcnt;
82
83 unsigned long flags;
84 /* ID for this css, if possible */
85 struct css_id __rcu *id;
86
87 /* Used to put @cgroup->dentry on the last css_put() */
88 struct work_struct dput_work;
89 };
90
91 /* bits in struct cgroup_subsys_state flags field */
92 enum {
93 CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */
94 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
95 };
96
97 /* Caller must verify that the css is not for root cgroup */
98 static inline void __css_get(struct cgroup_subsys_state *css, int count)
99 {
100 atomic_add(count, &css->refcnt);
101 }
102
103 /*
104 * Call css_get() to hold a reference on the css; it can be used
105 * for a reference obtained via:
106 * - an existing ref-counted reference to the css
107 * - task->cgroups for a locked task
108 */
109
110 static inline void css_get(struct cgroup_subsys_state *css)
111 {
112 /* We don't need to reference count the root state */
113 if (!(css->flags & CSS_ROOT))
114 __css_get(css, 1);
115 }
116
117 /*
118 * Call css_tryget() to take a reference on a css if your existing
119 * (known-valid) reference isn't already ref-counted. Returns false if
120 * the css has been destroyed.
121 */
122
123 extern bool __css_tryget(struct cgroup_subsys_state *css);
124 static inline bool css_tryget(struct cgroup_subsys_state *css)
125 {
126 if (css->flags & CSS_ROOT)
127 return true;
128 return __css_tryget(css);
129 }
130
131 /*
132 * css_put() should be called to release a reference taken by
133 * css_get() or css_tryget()
134 */
135
136 extern void __css_put(struct cgroup_subsys_state *css);
137 static inline void css_put(struct cgroup_subsys_state *css)
138 {
139 if (!(css->flags & CSS_ROOT))
140 __css_put(css);
141 }
142
143 /* bits in struct cgroup flags field */
144 enum {
145 /* Control Group is dead */
146 CGRP_REMOVED,
147 /*
148 * Control Group has previously had a child cgroup or a task,
149 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
150 */
151 CGRP_RELEASABLE,
152 /* Control Group requires release notifications to userspace */
153 CGRP_NOTIFY_ON_RELEASE,
154 /*
155 * Clone the parent's configuration when creating a new child
156 * cpuset cgroup. For historical reasons, this option can be
157 * specified at mount time and thus is implemented here.
158 */
159 CGRP_CPUSET_CLONE_CHILDREN,
160 /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */
161 CGRP_SANE_BEHAVIOR,
162 };
163
164 struct cgroup_name {
165 struct rcu_head rcu_head;
166 char name[];
167 };
168
169 struct cgroup {
170 unsigned long flags; /* "unsigned long" so bitops work */
171
172 /*
173 * count users of this cgroup. >0 means busy, but doesn't
174 * necessarily indicate the number of tasks in the cgroup
175 */
176 atomic_t count;
177
178 int id; /* ida allocated in-hierarchy ID */
179
180 /*
181 * We link our 'sibling' struct into our parent's 'children'.
182 * Our children link their 'sibling' into our 'children'.
183 */
184 struct list_head sibling; /* my parent's children */
185 struct list_head children; /* my children */
186 struct list_head files; /* my files */
187
188 struct cgroup *parent; /* my parent */
189 struct dentry *dentry; /* cgroup fs entry, RCU protected */
190
191 /*
192 * Monotonically increasing unique serial number which defines a
193 * uniform order among all cgroups. It's guaranteed that all
194 * ->children lists are in the ascending order of ->serial_nr.
195 * It's used to allow interrupting and resuming iterations.
196 */
197 u64 serial_nr;
198
199 /*
200 * This is a copy of dentry->d_name, and it's needed because
201 * we can't use dentry->d_name in cgroup_path().
202 *
203 * You must acquire rcu_read_lock() to access cgrp->name, and
204 * the only place that can change it is rename(), which is
205 * protected by parent dir's i_mutex.
206 *
207 * Normally you should use cgroup_name() wrapper rather than
208 * access it directly.
209 */
210 struct cgroup_name __rcu *name;
211
212 /* Private pointers for each registered subsystem */
213 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
214
215 struct cgroupfs_root *root;
216
217 /*
218 * List of cg_cgroup_links pointing at css_sets with
219 * tasks in this cgroup. Protected by css_set_lock
220 */
221 struct list_head css_sets;
222
223 struct list_head allcg_node; /* cgroupfs_root->allcg_list */
224 struct list_head cft_q_node; /* used during cftype add/rm */
225
226 /*
227 * Linked list running through all cgroups that can
228 * potentially be reaped by the release agent. Protected by
229 * release_list_lock
230 */
231 struct list_head release_list;
232
233 /*
234 * list of pidlists, up to two for each namespace (one for procs, one
235 * for tasks); created on demand.
236 */
237 struct list_head pidlists;
238 struct mutex pidlist_mutex;
239
240 /* For RCU-protected deletion */
241 struct rcu_head rcu_head;
242 struct work_struct free_work;
243
244 /* List of events which userspace want to receive */
245 struct list_head event_list;
246 spinlock_t event_list_lock;
247
248 /* directory xattrs */
249 struct simple_xattrs xattrs;
250 };
251
252 #define MAX_CGROUP_ROOT_NAMELEN 64
253
254 /* cgroupfs_root->flags */
255 enum {
256 /*
257 * Unfortunately, cgroup core and various controllers are riddled
258 * with idiosyncrasies and pointless options. The following flag,
259 * when set, will force sane behavior - some options are forced on,
260 * others are disallowed, and some controllers will change their
261 * hierarchical or other behaviors.
262 *
263 * The set of behaviors affected by this flag are still being
264 * determined and developed and the mount option for this flag is
265 * prefixed with __DEVEL__. The prefix will be dropped once we
266 * reach the point where all behaviors are compatible with the
267 * planned unified hierarchy, which will automatically turn on this
268 * flag.
269 *
270 * The followings are the behaviors currently affected this flag.
271 *
272 * - Mount options "noprefix" and "clone_children" are disallowed.
273 * Also, cgroupfs file cgroup.clone_children is not created.
274 *
275 * - When mounting an existing superblock, mount options should
276 * match.
277 *
278 * - Remount is disallowed.
279 *
280 * - memcg: use_hierarchy is on by default and the cgroup file for
281 * the flag is not created.
282 *
283 * The followings are planned changes.
284 *
285 * - release_agent will be disallowed once replacement notification
286 * mechanism is implemented.
287 */
288 CGRP_ROOT_SANE_BEHAVIOR = (1 << 0),
289
290 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
291 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
292 };
293
294 /*
295 * A cgroupfs_root represents the root of a cgroup hierarchy, and may be
296 * associated with a superblock to form an active hierarchy. This is
297 * internal to cgroup core. Don't access directly from controllers.
298 */
299 struct cgroupfs_root {
300 struct super_block *sb;
301
302 /*
303 * The bitmask of subsystems intended to be attached to this
304 * hierarchy
305 */
306 unsigned long subsys_mask;
307
308 /* Unique id for this hierarchy. */
309 int hierarchy_id;
310
311 /* The bitmask of subsystems currently attached to this hierarchy */
312 unsigned long actual_subsys_mask;
313
314 /* A list running through the attached subsystems */
315 struct list_head subsys_list;
316
317 /* The root cgroup for this hierarchy */
318 struct cgroup top_cgroup;
319
320 /* Tracks how many cgroups are currently defined in hierarchy.*/
321 int number_of_cgroups;
322
323 /* A list running through the active hierarchies */
324 struct list_head root_list;
325
326 /* All cgroups on this root, cgroup_mutex protected */
327 struct list_head allcg_list;
328
329 /* Hierarchy-specific flags */
330 unsigned long flags;
331
332 /* IDs for cgroups in this hierarchy */
333 struct ida cgroup_ida;
334
335 /* The path to use for release notifications. */
336 char release_agent_path[PATH_MAX];
337
338 /* The name for this hierarchy - may be empty */
339 char name[MAX_CGROUP_ROOT_NAMELEN];
340 };
341
342 /*
343 * A css_set is a structure holding pointers to a set of
344 * cgroup_subsys_state objects. This saves space in the task struct
345 * object and speeds up fork()/exit(), since a single inc/dec and a
346 * list_add()/del() can bump the reference count on the entire cgroup
347 * set for a task.
348 */
349
350 struct css_set {
351
352 /* Reference count */
353 atomic_t refcount;
354
355 /*
356 * List running through all cgroup groups in the same hash
357 * slot. Protected by css_set_lock
358 */
359 struct hlist_node hlist;
360
361 /*
362 * List running through all tasks using this cgroup
363 * group. Protected by css_set_lock
364 */
365 struct list_head tasks;
366
367 /*
368 * List of cg_cgroup_link objects on link chains from
369 * cgroups referenced from this css_set. Protected by
370 * css_set_lock
371 */
372 struct list_head cg_links;
373
374 /*
375 * Set of subsystem states, one for each subsystem. This array
376 * is immutable after creation apart from the init_css_set
377 * during subsystem registration (at boot time) and modular subsystem
378 * loading/unloading.
379 */
380 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
381
382 /* For RCU-protected deletion */
383 struct rcu_head rcu_head;
384 };
385
386 /*
387 * cgroup_map_cb is an abstract callback API for reporting map-valued
388 * control files
389 */
390
391 struct cgroup_map_cb {
392 int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
393 void *state;
394 };
395
396 /*
397 * struct cftype: handler definitions for cgroup control files
398 *
399 * When reading/writing to a file:
400 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
401 * - the 'cftype' of the file is file->f_dentry->d_fsdata
402 */
403
404 /* cftype->flags */
405 #define CFTYPE_ONLY_ON_ROOT (1U << 0) /* only create on root cg */
406 #define CFTYPE_NOT_ON_ROOT (1U << 1) /* don't create on root cg */
407 #define CFTYPE_INSANE (1U << 2) /* don't create if sane_behavior */
408
409 #define MAX_CFTYPE_NAME 64
410
411 struct cftype {
412 /*
413 * By convention, the name should begin with the name of the
414 * subsystem, followed by a period. Zero length string indicates
415 * end of cftype array.
416 */
417 char name[MAX_CFTYPE_NAME];
418 int private;
419 /*
420 * If not 0, file mode is set to this value, otherwise it will
421 * be figured out automatically
422 */
423 umode_t mode;
424
425 /*
426 * If non-zero, defines the maximum length of string that can
427 * be passed to write_string; defaults to 64
428 */
429 size_t max_write_len;
430
431 /* CFTYPE_* flags */
432 unsigned int flags;
433
434 int (*open)(struct inode *inode, struct file *file);
435 ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
436 struct file *file,
437 char __user *buf, size_t nbytes, loff_t *ppos);
438 /*
439 * read_u64() is a shortcut for the common case of returning a
440 * single integer. Use it in place of read()
441 */
442 u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
443 /*
444 * read_s64() is a signed version of read_u64()
445 */
446 s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
447 /*
448 * read_map() is used for defining a map of key/value
449 * pairs. It should call cb->fill(cb, key, value) for each
450 * entry. The key/value pairs (and their ordering) should not
451 * change between reboots.
452 */
453 int (*read_map)(struct cgroup *cont, struct cftype *cft,
454 struct cgroup_map_cb *cb);
455 /*
456 * read_seq_string() is used for outputting a simple sequence
457 * using seqfile.
458 */
459 int (*read_seq_string)(struct cgroup *cont, struct cftype *cft,
460 struct seq_file *m);
461
462 ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
463 struct file *file,
464 const char __user *buf, size_t nbytes, loff_t *ppos);
465
466 /*
467 * write_u64() is a shortcut for the common case of accepting
468 * a single integer (as parsed by simple_strtoull) from
469 * userspace. Use in place of write(); return 0 or error.
470 */
471 int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
472 /*
473 * write_s64() is a signed version of write_u64()
474 */
475 int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
476
477 /*
478 * write_string() is passed a nul-terminated kernelspace
479 * buffer of maximum length determined by max_write_len.
480 * Returns 0 or -ve error code.
481 */
482 int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
483 const char *buffer);
484 /*
485 * trigger() callback can be used to get some kick from the
486 * userspace, when the actual string written is not important
487 * at all. The private field can be used to determine the
488 * kick type for multiplexing.
489 */
490 int (*trigger)(struct cgroup *cgrp, unsigned int event);
491
492 int (*release)(struct inode *inode, struct file *file);
493
494 /*
495 * register_event() callback will be used to add new userspace
496 * waiter for changes related to the cftype. Implement it if
497 * you want to provide this functionality. Use eventfd_signal()
498 * on eventfd to send notification to userspace.
499 */
500 int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
501 struct eventfd_ctx *eventfd, const char *args);
502 /*
503 * unregister_event() callback will be called when userspace
504 * closes the eventfd or on cgroup removing.
505 * This callback must be implemented, if you want provide
506 * notification functionality.
507 */
508 void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
509 struct eventfd_ctx *eventfd);
510 };
511
512 /*
513 * cftype_sets describe cftypes belonging to a subsystem and are chained at
514 * cgroup_subsys->cftsets. Each cftset points to an array of cftypes
515 * terminated by zero length name.
516 */
517 struct cftype_set {
518 struct list_head node; /* chained at subsys->cftsets */
519 struct cftype *cfts;
520 };
521
522 struct cgroup_scanner {
523 struct cgroup *cg;
524 int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
525 void (*process_task)(struct task_struct *p,
526 struct cgroup_scanner *scan);
527 struct ptr_heap *heap;
528 void *data;
529 };
530
531 /*
532 * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
533 * function can be called as long as @cgrp is accessible.
534 */
535 static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
536 {
537 return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
538 }
539
540 /* Caller should hold rcu_read_lock() */
541 static inline const char *cgroup_name(const struct cgroup *cgrp)
542 {
543 return rcu_dereference(cgrp->name)->name;
544 }
545
546 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
547 int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
548
549 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
550
551 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
552 int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id,
553 char *buf, size_t buflen);
554
555 int cgroup_task_count(const struct cgroup *cgrp);
556
557 /*
558 * Control Group taskset, used to pass around set of tasks to cgroup_subsys
559 * methods.
560 */
561 struct cgroup_taskset;
562 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
563 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
564 struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
565 int cgroup_taskset_size(struct cgroup_taskset *tset);
566
567 /**
568 * cgroup_taskset_for_each - iterate cgroup_taskset
569 * @task: the loop cursor
570 * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
571 * @tset: taskset to iterate
572 */
573 #define cgroup_taskset_for_each(task, skip_cgrp, tset) \
574 for ((task) = cgroup_taskset_first((tset)); (task); \
575 (task) = cgroup_taskset_next((tset))) \
576 if (!(skip_cgrp) || \
577 cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
578
579 /*
580 * Control Group subsystem type.
581 * See Documentation/cgroups/cgroups.txt for details
582 */
583
584 struct cgroup_subsys {
585 struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
586 int (*css_online)(struct cgroup *cgrp);
587 void (*css_offline)(struct cgroup *cgrp);
588 void (*css_free)(struct cgroup *cgrp);
589
590 int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
591 void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
592 void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
593 void (*fork)(struct task_struct *task);
594 void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
595 struct task_struct *task);
596 void (*bind)(struct cgroup *root);
597
598 int subsys_id;
599 int disabled;
600 int early_init;
601 /*
602 * True if this subsys uses ID. ID is not available before cgroup_init()
603 * (not available in early_init time.)
604 */
605 bool use_id;
606
607 /*
608 * If %false, this subsystem is properly hierarchical -
609 * configuration, resource accounting and restriction on a parent
610 * cgroup cover those of its children. If %true, hierarchy support
611 * is broken in some ways - some subsystems ignore hierarchy
612 * completely while others are only implemented half-way.
613 *
614 * It's now disallowed to create nested cgroups if the subsystem is
615 * broken and cgroup core will emit a warning message on such
616 * cases. Eventually, all subsystems will be made properly
617 * hierarchical and this will go away.
618 */
619 bool broken_hierarchy;
620 bool warned_broken_hierarchy;
621
622 #define MAX_CGROUP_TYPE_NAMELEN 32
623 const char *name;
624
625 /*
626 * Link to parent, and list entry in parent's children.
627 * Protected by cgroup_lock()
628 */
629 struct cgroupfs_root *root;
630 struct list_head sibling;
631 /* used when use_id == true */
632 struct idr idr;
633 spinlock_t id_lock;
634
635 /* list of cftype_sets */
636 struct list_head cftsets;
637
638 /* base cftypes, automatically [de]registered with subsys itself */
639 struct cftype *base_cftypes;
640 struct cftype_set base_cftset;
641
642 /* should be defined only by modular subsystems */
643 struct module *module;
644 };
645
646 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
647 #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
648 #include <linux/cgroup_subsys.h>
649 #undef IS_SUBSYS_ENABLED
650 #undef SUBSYS
651
652 static inline struct cgroup_subsys_state *cgroup_subsys_state(
653 struct cgroup *cgrp, int subsys_id)
654 {
655 return cgrp->subsys[subsys_id];
656 }
657
658 /*
659 * function to get the cgroup_subsys_state which allows for extra
660 * rcu_dereference_check() conditions, such as locks used during the
661 * cgroup_subsys::attach() methods.
662 */
663 #ifdef CONFIG_PROVE_RCU
664 extern struct mutex cgroup_mutex;
665 #define task_subsys_state_check(task, subsys_id, __c) \
666 rcu_dereference_check((task)->cgroups->subsys[(subsys_id)], \
667 lockdep_is_held(&(task)->alloc_lock) || \
668 lockdep_is_held(&cgroup_mutex) || (__c))
669 #else
670 #define task_subsys_state_check(task, subsys_id, __c) \
671 rcu_dereference((task)->cgroups->subsys[(subsys_id)])
672 #endif
673
674 static inline struct cgroup_subsys_state *
675 task_subsys_state(struct task_struct *task, int subsys_id)
676 {
677 return task_subsys_state_check(task, subsys_id, false);
678 }
679
680 static inline struct cgroup* task_cgroup(struct task_struct *task,
681 int subsys_id)
682 {
683 return task_subsys_state(task, subsys_id)->cgroup;
684 }
685
686 struct cgroup *cgroup_next_sibling(struct cgroup *pos);
687
688 /**
689 * cgroup_for_each_child - iterate through children of a cgroup
690 * @pos: the cgroup * to use as the loop cursor
691 * @cgrp: cgroup whose children to walk
692 *
693 * Walk @cgrp's children. Must be called under rcu_read_lock(). A child
694 * cgroup which hasn't finished ->css_online() or already has finished
695 * ->css_offline() may show up during traversal and it's each subsystem's
696 * responsibility to verify that each @pos is alive.
697 *
698 * If a subsystem synchronizes against the parent in its ->css_online() and
699 * before starting iterating, a cgroup which finished ->css_online() is
700 * guaranteed to be visible in the future iterations.
701 *
702 * It is allowed to temporarily drop RCU read lock during iteration. The
703 * caller is responsible for ensuring that @pos remains accessible until
704 * the start of the next iteration by, for example, bumping the css refcnt.
705 */
706 #define cgroup_for_each_child(pos, cgrp) \
707 for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
708 struct cgroup, sibling); \
709 (pos); (pos) = cgroup_next_sibling((pos)))
710
711 struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
712 struct cgroup *cgroup);
713 struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
714
715 /**
716 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
717 * @pos: the cgroup * to use as the loop cursor
718 * @cgroup: cgroup whose descendants to walk
719 *
720 * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A
721 * descendant cgroup which hasn't finished ->css_online() or already has
722 * finished ->css_offline() may show up during traversal and it's each
723 * subsystem's responsibility to verify that each @pos is alive.
724 *
725 * If a subsystem synchronizes against the parent in its ->css_online() and
726 * before starting iterating, and synchronizes against @pos on each
727 * iteration, any descendant cgroup which finished ->css_online() is
728 * guaranteed to be visible in the future iterations.
729 *
730 * In other words, the following guarantees that a descendant can't escape
731 * state updates of its ancestors.
732 *
733 * my_online(@cgrp)
734 * {
735 * Lock @cgrp->parent and @cgrp;
736 * Inherit state from @cgrp->parent;
737 * Unlock both.
738 * }
739 *
740 * my_update_state(@cgrp)
741 * {
742 * Lock @cgrp;
743 * Update @cgrp's state;
744 * Unlock @cgrp;
745 *
746 * cgroup_for_each_descendant_pre(@pos, @cgrp) {
747 * Lock @pos;
748 * Verify @pos is alive and inherit state from @pos->parent;
749 * Unlock @pos;
750 * }
751 * }
752 *
753 * As long as the inheriting step, including checking the parent state, is
754 * enclosed inside @pos locking, double-locking the parent isn't necessary
755 * while inheriting. The state update to the parent is guaranteed to be
756 * visible by walking order and, as long as inheriting operations to the
757 * same @pos are atomic to each other, multiple updates racing each other
758 * still result in the correct state. It's guaranateed that at least one
759 * inheritance happens for any cgroup after the latest update to its
760 * parent.
761 *
762 * If checking parent's state requires locking the parent, each inheriting
763 * iteration should lock and unlock both @pos->parent and @pos.
764 *
765 * Alternatively, a subsystem may choose to use a single global lock to
766 * synchronize ->css_online() and ->css_offline() against tree-walking
767 * operations.
768 *
769 * It is allowed to temporarily drop RCU read lock during iteration. The
770 * caller is responsible for ensuring that @pos remains accessible until
771 * the start of the next iteration by, for example, bumping the css refcnt.
772 */
773 #define cgroup_for_each_descendant_pre(pos, cgroup) \
774 for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \
775 pos = cgroup_next_descendant_pre((pos), (cgroup)))
776
777 struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
778 struct cgroup *cgroup);
779
780 /**
781 * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
782 * @pos: the cgroup * to use as the loop cursor
783 * @cgroup: cgroup whose descendants to walk
784 *
785 * Similar to cgroup_for_each_descendant_pre() but performs post-order
786 * traversal instead. Note that the walk visibility guarantee described in
787 * pre-order walk doesn't apply the same to post-order walks.
788 */
789 #define cgroup_for_each_descendant_post(pos, cgroup) \
790 for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \
791 pos = cgroup_next_descendant_post((pos), (cgroup)))
792
793 /* A cgroup_iter should be treated as an opaque object */
794 struct cgroup_iter {
795 struct list_head *cg_link;
796 struct list_head *task;
797 };
798
799 /*
800 * To iterate across the tasks in a cgroup:
801 *
802 * 1) call cgroup_iter_start to initialize an iterator
803 *
804 * 2) call cgroup_iter_next() to retrieve member tasks until it
805 * returns NULL or until you want to end the iteration
806 *
807 * 3) call cgroup_iter_end() to destroy the iterator.
808 *
809 * Or, call cgroup_scan_tasks() to iterate through every task in a
810 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
811 * the test_task() callback, but not while calling the process_task()
812 * callback.
813 */
814 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
815 struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
816 struct cgroup_iter *it);
817 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
818 int cgroup_scan_tasks(struct cgroup_scanner *scan);
819 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
820 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
821
822 /*
823 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
824 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
825 * CSS ID is assigned at cgroup allocation (create) automatically
826 * and removed when subsys calls free_css_id() function. This is because
827 * the lifetime of cgroup_subsys_state is subsys's matter.
828 *
829 * Looking up and scanning function should be called under rcu_read_lock().
830 * Taking cgroup_mutex is not necessary for following calls.
831 * But the css returned by this routine can be "not populated yet" or "being
832 * destroyed". The caller should check css and cgroup's status.
833 */
834
835 /*
836 * Typically Called at ->destroy(), or somewhere the subsys frees
837 * cgroup_subsys_state.
838 */
839 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
840
841 /* Find a cgroup_subsys_state which has given ID */
842
843 struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
844
845 /* Returns true if root is ancestor of cg */
846 bool css_is_ancestor(struct cgroup_subsys_state *cg,
847 const struct cgroup_subsys_state *root);
848
849 /* Get id and depth of css */
850 unsigned short css_id(struct cgroup_subsys_state *css);
851 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
852
853 #else /* !CONFIG_CGROUPS */
854
855 static inline int cgroup_init_early(void) { return 0; }
856 static inline int cgroup_init(void) { return 0; }
857 static inline void cgroup_fork(struct task_struct *p) {}
858 static inline void cgroup_post_fork(struct task_struct *p) {}
859 static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
860
861 static inline int cgroupstats_build(struct cgroupstats *stats,
862 struct dentry *dentry)
863 {
864 return -EINVAL;
865 }
866
867 /* No cgroups - nothing to do */
868 static inline int cgroup_attach_task_all(struct task_struct *from,
869 struct task_struct *t)
870 {
871 return 0;
872 }
873
874 #endif /* !CONFIG_CGROUPS */
875
876 #endif /* _LINUX_CGROUP_H */
This page took 0.059261 seconds and 5 git commands to generate.