cgroup: Merge branch 'for-3.11-fixes' into for-3.12
[deliverable/linux.git] / include / linux / cgroup.h
1 #ifndef _LINUX_CGROUP_H
2 #define _LINUX_CGROUP_H
3 /*
4 * cgroup interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rculist.h>
16 #include <linux/cgroupstats.h>
17 #include <linux/prio_heap.h>
18 #include <linux/rwsem.h>
19 #include <linux/idr.h>
20 #include <linux/workqueue.h>
21 #include <linux/xattr.h>
22 #include <linux/fs.h>
23 #include <linux/percpu-refcount.h>
24
25 #ifdef CONFIG_CGROUPS
26
27 struct cgroupfs_root;
28 struct cgroup_subsys;
29 struct inode;
30 struct cgroup;
31 struct css_id;
32 struct eventfd_ctx;
33
34 extern int cgroup_init_early(void);
35 extern int cgroup_init(void);
36 extern void cgroup_fork(struct task_struct *p);
37 extern void cgroup_post_fork(struct task_struct *p);
38 extern void cgroup_exit(struct task_struct *p, int run_callbacks);
39 extern int cgroupstats_build(struct cgroupstats *stats,
40 struct dentry *dentry);
41 extern int cgroup_load_subsys(struct cgroup_subsys *ss);
42 extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
43
44 extern int proc_cgroup_show(struct seq_file *, void *);
45
46 /*
47 * Define the enumeration of all cgroup subsystems.
48 *
49 * We define ids for builtin subsystems and then modular ones.
50 */
51 #define SUBSYS(_x) _x ## _subsys_id,
52 enum cgroup_subsys_id {
53 #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
54 #include <linux/cgroup_subsys.h>
55 #undef IS_SUBSYS_ENABLED
56 CGROUP_BUILTIN_SUBSYS_COUNT,
57
58 __CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,
59
60 #define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
61 #include <linux/cgroup_subsys.h>
62 #undef IS_SUBSYS_ENABLED
63 CGROUP_SUBSYS_COUNT,
64 };
65 #undef SUBSYS
66
67 /* Per-subsystem/per-cgroup state maintained by the system. */
68 struct cgroup_subsys_state {
69 /*
70 * The cgroup that this subsystem is attached to. Useful
71 * for subsystems that want to know about the cgroup
72 * hierarchy structure
73 */
74 struct cgroup *cgroup;
75
76 /* reference count - access via css_[try]get() and css_put() */
77 struct percpu_ref refcnt;
78
79 unsigned long flags;
80 /* ID for this css, if possible */
81 struct css_id __rcu *id;
82
83 /* Used to put @cgroup->dentry on the last css_put() */
84 struct work_struct dput_work;
85 };
86
87 /* bits in struct cgroup_subsys_state flags field */
88 enum {
89 CSS_ROOT = (1 << 0), /* this CSS is the root of the subsystem */
90 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */
91 };
92
93 /**
94 * css_get - obtain a reference on the specified css
95 * @css: target css
96 *
97 * The caller must already have a reference.
98 */
99 static inline void css_get(struct cgroup_subsys_state *css)
100 {
101 /* We don't need to reference count the root state */
102 if (!(css->flags & CSS_ROOT))
103 percpu_ref_get(&css->refcnt);
104 }
105
106 /**
107 * css_tryget - try to obtain a reference on the specified css
108 * @css: target css
109 *
110 * Obtain a reference on @css if it's alive. The caller naturally needs to
111 * ensure that @css is accessible but doesn't have to be holding a
112 * reference on it - IOW, RCU protected access is good enough for this
113 * function. Returns %true if a reference count was successfully obtained;
114 * %false otherwise.
115 */
116 static inline bool css_tryget(struct cgroup_subsys_state *css)
117 {
118 if (css->flags & CSS_ROOT)
119 return true;
120 return percpu_ref_tryget(&css->refcnt);
121 }
122
123 /**
124 * css_put - put a css reference
125 * @css: target css
126 *
127 * Put a reference obtained via css_get() and css_tryget().
128 */
129 static inline void css_put(struct cgroup_subsys_state *css)
130 {
131 if (!(css->flags & CSS_ROOT))
132 percpu_ref_put(&css->refcnt);
133 }
134
135 /* bits in struct cgroup flags field */
136 enum {
137 /* Control Group is dead */
138 CGRP_DEAD,
139 /*
140 * Control Group has previously had a child cgroup or a task,
141 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
142 */
143 CGRP_RELEASABLE,
144 /* Control Group requires release notifications to userspace */
145 CGRP_NOTIFY_ON_RELEASE,
146 /*
147 * Clone the parent's configuration when creating a new child
148 * cpuset cgroup. For historical reasons, this option can be
149 * specified at mount time and thus is implemented here.
150 */
151 CGRP_CPUSET_CLONE_CHILDREN,
152 /* see the comment above CGRP_ROOT_SANE_BEHAVIOR for details */
153 CGRP_SANE_BEHAVIOR,
154 };
155
156 struct cgroup_name {
157 struct rcu_head rcu_head;
158 char name[];
159 };
160
161 struct cgroup {
162 unsigned long flags; /* "unsigned long" so bitops work */
163
164 /*
165 * idr allocated in-hierarchy ID.
166 *
167 * The ID of the root cgroup is always 0, and a new cgroup
168 * will be assigned with a smallest available ID.
169 */
170 int id;
171
172 /*
173 * We link our 'sibling' struct into our parent's 'children'.
174 * Our children link their 'sibling' into our 'children'.
175 */
176 struct list_head sibling; /* my parent's children */
177 struct list_head children; /* my children */
178 struct list_head files; /* my files */
179
180 struct cgroup *parent; /* my parent */
181 struct dentry *dentry; /* cgroup fs entry, RCU protected */
182
183 /*
184 * Monotonically increasing unique serial number which defines a
185 * uniform order among all cgroups. It's guaranteed that all
186 * ->children lists are in the ascending order of ->serial_nr.
187 * It's used to allow interrupting and resuming iterations.
188 */
189 u64 serial_nr;
190
191 /*
192 * This is a copy of dentry->d_name, and it's needed because
193 * we can't use dentry->d_name in cgroup_path().
194 *
195 * You must acquire rcu_read_lock() to access cgrp->name, and
196 * the only place that can change it is rename(), which is
197 * protected by parent dir's i_mutex.
198 *
199 * Normally you should use cgroup_name() wrapper rather than
200 * access it directly.
201 */
202 struct cgroup_name __rcu *name;
203
204 /* Private pointers for each registered subsystem */
205 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
206
207 struct cgroupfs_root *root;
208
209 /*
210 * List of cgrp_cset_links pointing at css_sets with tasks in this
211 * cgroup. Protected by css_set_lock.
212 */
213 struct list_head cset_links;
214
215 /*
216 * Linked list running through all cgroups that can
217 * potentially be reaped by the release agent. Protected by
218 * release_list_lock
219 */
220 struct list_head release_list;
221
222 /*
223 * list of pidlists, up to two for each namespace (one for procs, one
224 * for tasks); created on demand.
225 */
226 struct list_head pidlists;
227 struct mutex pidlist_mutex;
228
229 /* For css percpu_ref killing and RCU-protected deletion */
230 struct rcu_head rcu_head;
231 struct work_struct destroy_work;
232 atomic_t css_kill_cnt;
233
234 /* List of events which userspace want to receive */
235 struct list_head event_list;
236 spinlock_t event_list_lock;
237
238 /* directory xattrs */
239 struct simple_xattrs xattrs;
240 };
241
242 #define MAX_CGROUP_ROOT_NAMELEN 64
243
244 /* cgroupfs_root->flags */
245 enum {
246 /*
247 * Unfortunately, cgroup core and various controllers are riddled
248 * with idiosyncrasies and pointless options. The following flag,
249 * when set, will force sane behavior - some options are forced on,
250 * others are disallowed, and some controllers will change their
251 * hierarchical or other behaviors.
252 *
253 * The set of behaviors affected by this flag are still being
254 * determined and developed and the mount option for this flag is
255 * prefixed with __DEVEL__. The prefix will be dropped once we
256 * reach the point where all behaviors are compatible with the
257 * planned unified hierarchy, which will automatically turn on this
258 * flag.
259 *
260 * The followings are the behaviors currently affected this flag.
261 *
262 * - Mount options "noprefix" and "clone_children" are disallowed.
263 * Also, cgroupfs file cgroup.clone_children is not created.
264 *
265 * - When mounting an existing superblock, mount options should
266 * match.
267 *
268 * - Remount is disallowed.
269 *
270 * - rename(2) is disallowed.
271 *
272 * - "tasks" is removed. Everything should be at process
273 * granularity. Use "cgroup.procs" instead.
274 *
275 * - "release_agent" and "notify_on_release" are removed.
276 * Replacement notification mechanism will be implemented.
277 *
278 * - cpuset: tasks will be kept in empty cpusets when hotplug happens
279 * and take masks of ancestors with non-empty cpus/mems, instead of
280 * being moved to an ancestor.
281 *
282 * - cpuset: a task can be moved into an empty cpuset, and again it
283 * takes masks of ancestors.
284 *
285 * - memcg: use_hierarchy is on by default and the cgroup file for
286 * the flag is not created.
287 *
288 * - blkcg: blk-throttle becomes properly hierarchical.
289 */
290 CGRP_ROOT_SANE_BEHAVIOR = (1 << 0),
291
292 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */
293 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */
294
295 /* mount options live below bit 16 */
296 CGRP_ROOT_OPTION_MASK = (1 << 16) - 1,
297
298 CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */
299 };
300
301 /*
302 * A cgroupfs_root represents the root of a cgroup hierarchy, and may be
303 * associated with a superblock to form an active hierarchy. This is
304 * internal to cgroup core. Don't access directly from controllers.
305 */
306 struct cgroupfs_root {
307 struct super_block *sb;
308
309 /* The bitmask of subsystems attached to this hierarchy */
310 unsigned long subsys_mask;
311
312 /* Unique id for this hierarchy. */
313 int hierarchy_id;
314
315 /* A list running through the attached subsystems */
316 struct list_head subsys_list;
317
318 /* The root cgroup for this hierarchy */
319 struct cgroup top_cgroup;
320
321 /* Tracks how many cgroups are currently defined in hierarchy.*/
322 int number_of_cgroups;
323
324 /* A list running through the active hierarchies */
325 struct list_head root_list;
326
327 /* Hierarchy-specific flags */
328 unsigned long flags;
329
330 /* IDs for cgroups in this hierarchy */
331 struct idr cgroup_idr;
332
333 /* The path to use for release notifications. */
334 char release_agent_path[PATH_MAX];
335
336 /* The name for this hierarchy - may be empty */
337 char name[MAX_CGROUP_ROOT_NAMELEN];
338 };
339
340 /*
341 * A css_set is a structure holding pointers to a set of
342 * cgroup_subsys_state objects. This saves space in the task struct
343 * object and speeds up fork()/exit(), since a single inc/dec and a
344 * list_add()/del() can bump the reference count on the entire cgroup
345 * set for a task.
346 */
347
348 struct css_set {
349
350 /* Reference count */
351 atomic_t refcount;
352
353 /*
354 * List running through all cgroup groups in the same hash
355 * slot. Protected by css_set_lock
356 */
357 struct hlist_node hlist;
358
359 /*
360 * List running through all tasks using this cgroup
361 * group. Protected by css_set_lock
362 */
363 struct list_head tasks;
364
365 /*
366 * List of cgrp_cset_links pointing at cgroups referenced from this
367 * css_set. Protected by css_set_lock.
368 */
369 struct list_head cgrp_links;
370
371 /*
372 * Set of subsystem states, one for each subsystem. This array
373 * is immutable after creation apart from the init_css_set
374 * during subsystem registration (at boot time) and modular subsystem
375 * loading/unloading.
376 */
377 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
378
379 /* For RCU-protected deletion */
380 struct rcu_head rcu_head;
381 };
382
383 /*
384 * cgroup_map_cb is an abstract callback API for reporting map-valued
385 * control files
386 */
387
388 struct cgroup_map_cb {
389 int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
390 void *state;
391 };
392
393 /*
394 * struct cftype: handler definitions for cgroup control files
395 *
396 * When reading/writing to a file:
397 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
398 * - the 'cftype' of the file is file->f_dentry->d_fsdata
399 */
400
401 /* cftype->flags */
402 enum {
403 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
404 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
405 CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
406 };
407
408 #define MAX_CFTYPE_NAME 64
409
410 struct cftype {
411 /*
412 * By convention, the name should begin with the name of the
413 * subsystem, followed by a period. Zero length string indicates
414 * end of cftype array.
415 */
416 char name[MAX_CFTYPE_NAME];
417 int private;
418 /*
419 * If not 0, file mode is set to this value, otherwise it will
420 * be figured out automatically
421 */
422 umode_t mode;
423
424 /*
425 * If non-zero, defines the maximum length of string that can
426 * be passed to write_string; defaults to 64
427 */
428 size_t max_write_len;
429
430 /* CFTYPE_* flags */
431 unsigned int flags;
432
433 int (*open)(struct inode *inode, struct file *file);
434 ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
435 struct file *file,
436 char __user *buf, size_t nbytes, loff_t *ppos);
437 /*
438 * read_u64() is a shortcut for the common case of returning a
439 * single integer. Use it in place of read()
440 */
441 u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
442 /*
443 * read_s64() is a signed version of read_u64()
444 */
445 s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
446 /*
447 * read_map() is used for defining a map of key/value
448 * pairs. It should call cb->fill(cb, key, value) for each
449 * entry. The key/value pairs (and their ordering) should not
450 * change between reboots.
451 */
452 int (*read_map)(struct cgroup *cgrp, struct cftype *cft,
453 struct cgroup_map_cb *cb);
454 /*
455 * read_seq_string() is used for outputting a simple sequence
456 * using seqfile.
457 */
458 int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft,
459 struct seq_file *m);
460
461 ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
462 struct file *file,
463 const char __user *buf, size_t nbytes, loff_t *ppos);
464
465 /*
466 * write_u64() is a shortcut for the common case of accepting
467 * a single integer (as parsed by simple_strtoull) from
468 * userspace. Use in place of write(); return 0 or error.
469 */
470 int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
471 /*
472 * write_s64() is a signed version of write_u64()
473 */
474 int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
475
476 /*
477 * write_string() is passed a nul-terminated kernelspace
478 * buffer of maximum length determined by max_write_len.
479 * Returns 0 or -ve error code.
480 */
481 int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
482 const char *buffer);
483 /*
484 * trigger() callback can be used to get some kick from the
485 * userspace, when the actual string written is not important
486 * at all. The private field can be used to determine the
487 * kick type for multiplexing.
488 */
489 int (*trigger)(struct cgroup *cgrp, unsigned int event);
490
491 int (*release)(struct inode *inode, struct file *file);
492
493 /*
494 * register_event() callback will be used to add new userspace
495 * waiter for changes related to the cftype. Implement it if
496 * you want to provide this functionality. Use eventfd_signal()
497 * on eventfd to send notification to userspace.
498 */
499 int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
500 struct eventfd_ctx *eventfd, const char *args);
501 /*
502 * unregister_event() callback will be called when userspace
503 * closes the eventfd or on cgroup removing.
504 * This callback must be implemented, if you want provide
505 * notification functionality.
506 */
507 void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
508 struct eventfd_ctx *eventfd);
509 };
510
511 /*
512 * cftype_sets describe cftypes belonging to a subsystem and are chained at
513 * cgroup_subsys->cftsets. Each cftset points to an array of cftypes
514 * terminated by zero length name.
515 */
516 struct cftype_set {
517 struct list_head node; /* chained at subsys->cftsets */
518 struct cftype *cfts;
519 };
520
521 struct cgroup_scanner {
522 struct cgroup *cgrp;
523 int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
524 void (*process_task)(struct task_struct *p,
525 struct cgroup_scanner *scan);
526 struct ptr_heap *heap;
527 void *data;
528 };
529
530 /*
531 * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
532 * function can be called as long as @cgrp is accessible.
533 */
534 static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
535 {
536 return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
537 }
538
539 /* Caller should hold rcu_read_lock() */
540 static inline const char *cgroup_name(const struct cgroup *cgrp)
541 {
542 return rcu_dereference(cgrp->name)->name;
543 }
544
545 int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
546 int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
547
548 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
549
550 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
551 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
552
553 int cgroup_task_count(const struct cgroup *cgrp);
554
555 /*
556 * Control Group taskset, used to pass around set of tasks to cgroup_subsys
557 * methods.
558 */
559 struct cgroup_taskset;
560 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
561 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
562 struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
563 int cgroup_taskset_size(struct cgroup_taskset *tset);
564
565 /**
566 * cgroup_taskset_for_each - iterate cgroup_taskset
567 * @task: the loop cursor
568 * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
569 * @tset: taskset to iterate
570 */
571 #define cgroup_taskset_for_each(task, skip_cgrp, tset) \
572 for ((task) = cgroup_taskset_first((tset)); (task); \
573 (task) = cgroup_taskset_next((tset))) \
574 if (!(skip_cgrp) || \
575 cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
576
577 /*
578 * Control Group subsystem type.
579 * See Documentation/cgroups/cgroups.txt for details
580 */
581
582 struct cgroup_subsys {
583 struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
584 int (*css_online)(struct cgroup *cgrp);
585 void (*css_offline)(struct cgroup *cgrp);
586 void (*css_free)(struct cgroup *cgrp);
587
588 int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
589 void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
590 void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
591 void (*fork)(struct task_struct *task);
592 void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
593 struct task_struct *task);
594 void (*bind)(struct cgroup *root);
595
596 int subsys_id;
597 int disabled;
598 int early_init;
599 /*
600 * True if this subsys uses ID. ID is not available before cgroup_init()
601 * (not available in early_init time.)
602 */
603 bool use_id;
604
605 /*
606 * If %false, this subsystem is properly hierarchical -
607 * configuration, resource accounting and restriction on a parent
608 * cgroup cover those of its children. If %true, hierarchy support
609 * is broken in some ways - some subsystems ignore hierarchy
610 * completely while others are only implemented half-way.
611 *
612 * It's now disallowed to create nested cgroups if the subsystem is
613 * broken and cgroup core will emit a warning message on such
614 * cases. Eventually, all subsystems will be made properly
615 * hierarchical and this will go away.
616 */
617 bool broken_hierarchy;
618 bool warned_broken_hierarchy;
619
620 #define MAX_CGROUP_TYPE_NAMELEN 32
621 const char *name;
622
623 /*
624 * Link to parent, and list entry in parent's children.
625 * Protected by cgroup_lock()
626 */
627 struct cgroupfs_root *root;
628 struct list_head sibling;
629 /* used when use_id == true */
630 struct idr idr;
631 spinlock_t id_lock;
632
633 /* list of cftype_sets */
634 struct list_head cftsets;
635
636 /* base cftypes, automatically [de]registered with subsys itself */
637 struct cftype *base_cftypes;
638 struct cftype_set base_cftset;
639
640 /* should be defined only by modular subsystems */
641 struct module *module;
642 };
643
644 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
645 #define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
646 #include <linux/cgroup_subsys.h>
647 #undef IS_SUBSYS_ENABLED
648 #undef SUBSYS
649
650 static inline struct cgroup_subsys_state *cgroup_subsys_state(
651 struct cgroup *cgrp, int subsys_id)
652 {
653 return cgrp->subsys[subsys_id];
654 }
655
656 /**
657 * task_css_set_check - obtain a task's css_set with extra access conditions
658 * @task: the task to obtain css_set for
659 * @__c: extra condition expression to be passed to rcu_dereference_check()
660 *
661 * A task's css_set is RCU protected, initialized and exited while holding
662 * task_lock(), and can only be modified while holding both cgroup_mutex
663 * and task_lock() while the task is alive. This macro verifies that the
664 * caller is inside proper critical section and returns @task's css_set.
665 *
666 * The caller can also specify additional allowed conditions via @__c, such
667 * as locks used during the cgroup_subsys::attach() methods.
668 */
669 #ifdef CONFIG_PROVE_RCU
670 extern struct mutex cgroup_mutex;
671 #define task_css_set_check(task, __c) \
672 rcu_dereference_check((task)->cgroups, \
673 lockdep_is_held(&(task)->alloc_lock) || \
674 lockdep_is_held(&cgroup_mutex) || (__c))
675 #else
676 #define task_css_set_check(task, __c) \
677 rcu_dereference((task)->cgroups)
678 #endif
679
680 /**
681 * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
682 * @task: the target task
683 * @subsys_id: the target subsystem ID
684 * @__c: extra condition expression to be passed to rcu_dereference_check()
685 *
686 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
687 * synchronization rules are the same as task_css_set_check().
688 */
689 #define task_subsys_state_check(task, subsys_id, __c) \
690 task_css_set_check((task), (__c))->subsys[(subsys_id)]
691
692 /**
693 * task_css_set - obtain a task's css_set
694 * @task: the task to obtain css_set for
695 *
696 * See task_css_set_check().
697 */
698 static inline struct css_set *task_css_set(struct task_struct *task)
699 {
700 return task_css_set_check(task, false);
701 }
702
703 /**
704 * task_subsys_state - obtain css for (task, subsys)
705 * @task: the target task
706 * @subsys_id: the target subsystem ID
707 *
708 * See task_subsys_state_check().
709 */
710 static inline struct cgroup_subsys_state *
711 task_subsys_state(struct task_struct *task, int subsys_id)
712 {
713 return task_subsys_state_check(task, subsys_id, false);
714 }
715
716 static inline struct cgroup* task_cgroup(struct task_struct *task,
717 int subsys_id)
718 {
719 return task_subsys_state(task, subsys_id)->cgroup;
720 }
721
722 /**
723 * cgroup_from_id - lookup cgroup by id
724 * @ss: cgroup subsys to be looked into
725 * @id: the cgroup id
726 *
727 * Returns the cgroup if there's valid one with @id, otherwise returns NULL.
728 * Should be called under rcu_read_lock().
729 */
730 static inline struct cgroup *cgroup_from_id(struct cgroup_subsys *ss, int id)
731 {
732 #ifdef CONFIG_PROVE_RCU
733 rcu_lockdep_assert(rcu_read_lock_held() ||
734 lockdep_is_held(&cgroup_mutex),
735 "cgroup_from_id() needs proper protection");
736 #endif
737 return idr_find(&ss->root->cgroup_idr, id);
738 }
739
740 struct cgroup *cgroup_next_sibling(struct cgroup *pos);
741
742 /**
743 * cgroup_for_each_child - iterate through children of a cgroup
744 * @pos: the cgroup * to use as the loop cursor
745 * @cgrp: cgroup whose children to walk
746 *
747 * Walk @cgrp's children. Must be called under rcu_read_lock(). A child
748 * cgroup which hasn't finished ->css_online() or already has finished
749 * ->css_offline() may show up during traversal and it's each subsystem's
750 * responsibility to verify that each @pos is alive.
751 *
752 * If a subsystem synchronizes against the parent in its ->css_online() and
753 * before starting iterating, a cgroup which finished ->css_online() is
754 * guaranteed to be visible in the future iterations.
755 *
756 * It is allowed to temporarily drop RCU read lock during iteration. The
757 * caller is responsible for ensuring that @pos remains accessible until
758 * the start of the next iteration by, for example, bumping the css refcnt.
759 */
760 #define cgroup_for_each_child(pos, cgrp) \
761 for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
762 struct cgroup, sibling); \
763 (pos); (pos) = cgroup_next_sibling((pos)))
764
765 struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
766 struct cgroup *cgroup);
767 struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
768
769 /**
770 * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
771 * @pos: the cgroup * to use as the loop cursor
772 * @cgroup: cgroup whose descendants to walk
773 *
774 * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A
775 * descendant cgroup which hasn't finished ->css_online() or already has
776 * finished ->css_offline() may show up during traversal and it's each
777 * subsystem's responsibility to verify that each @pos is alive.
778 *
779 * If a subsystem synchronizes against the parent in its ->css_online() and
780 * before starting iterating, and synchronizes against @pos on each
781 * iteration, any descendant cgroup which finished ->css_online() is
782 * guaranteed to be visible in the future iterations.
783 *
784 * In other words, the following guarantees that a descendant can't escape
785 * state updates of its ancestors.
786 *
787 * my_online(@cgrp)
788 * {
789 * Lock @cgrp->parent and @cgrp;
790 * Inherit state from @cgrp->parent;
791 * Unlock both.
792 * }
793 *
794 * my_update_state(@cgrp)
795 * {
796 * Lock @cgrp;
797 * Update @cgrp's state;
798 * Unlock @cgrp;
799 *
800 * cgroup_for_each_descendant_pre(@pos, @cgrp) {
801 * Lock @pos;
802 * Verify @pos is alive and inherit state from @pos->parent;
803 * Unlock @pos;
804 * }
805 * }
806 *
807 * As long as the inheriting step, including checking the parent state, is
808 * enclosed inside @pos locking, double-locking the parent isn't necessary
809 * while inheriting. The state update to the parent is guaranteed to be
810 * visible by walking order and, as long as inheriting operations to the
811 * same @pos are atomic to each other, multiple updates racing each other
812 * still result in the correct state. It's guaranateed that at least one
813 * inheritance happens for any cgroup after the latest update to its
814 * parent.
815 *
816 * If checking parent's state requires locking the parent, each inheriting
817 * iteration should lock and unlock both @pos->parent and @pos.
818 *
819 * Alternatively, a subsystem may choose to use a single global lock to
820 * synchronize ->css_online() and ->css_offline() against tree-walking
821 * operations.
822 *
823 * It is allowed to temporarily drop RCU read lock during iteration. The
824 * caller is responsible for ensuring that @pos remains accessible until
825 * the start of the next iteration by, for example, bumping the css refcnt.
826 */
827 #define cgroup_for_each_descendant_pre(pos, cgroup) \
828 for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \
829 pos = cgroup_next_descendant_pre((pos), (cgroup)))
830
831 struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
832 struct cgroup *cgroup);
833
834 /**
835 * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
836 * @pos: the cgroup * to use as the loop cursor
837 * @cgroup: cgroup whose descendants to walk
838 *
839 * Similar to cgroup_for_each_descendant_pre() but performs post-order
840 * traversal instead. Note that the walk visibility guarantee described in
841 * pre-order walk doesn't apply the same to post-order walks.
842 */
843 #define cgroup_for_each_descendant_post(pos, cgroup) \
844 for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \
845 pos = cgroup_next_descendant_post((pos), (cgroup)))
846
847 /* A cgroup_iter should be treated as an opaque object */
848 struct cgroup_iter {
849 struct list_head *cset_link;
850 struct list_head *task;
851 };
852
853 /*
854 * To iterate across the tasks in a cgroup:
855 *
856 * 1) call cgroup_iter_start to initialize an iterator
857 *
858 * 2) call cgroup_iter_next() to retrieve member tasks until it
859 * returns NULL or until you want to end the iteration
860 *
861 * 3) call cgroup_iter_end() to destroy the iterator.
862 *
863 * Or, call cgroup_scan_tasks() to iterate through every task in a
864 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
865 * the test_task() callback, but not while calling the process_task()
866 * callback.
867 */
868 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
869 struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
870 struct cgroup_iter *it);
871 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
872 int cgroup_scan_tasks(struct cgroup_scanner *scan);
873 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
874 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
875
876 /*
877 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
878 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
879 * CSS ID is assigned at cgroup allocation (create) automatically
880 * and removed when subsys calls free_css_id() function. This is because
881 * the lifetime of cgroup_subsys_state is subsys's matter.
882 *
883 * Looking up and scanning function should be called under rcu_read_lock().
884 * Taking cgroup_mutex is not necessary for following calls.
885 * But the css returned by this routine can be "not populated yet" or "being
886 * destroyed". The caller should check css and cgroup's status.
887 */
888
889 /*
890 * Typically Called at ->destroy(), or somewhere the subsys frees
891 * cgroup_subsys_state.
892 */
893 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
894
895 /* Find a cgroup_subsys_state which has given ID */
896
897 struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
898
899 /* Returns true if root is ancestor of cg */
900 bool css_is_ancestor(struct cgroup_subsys_state *cg,
901 const struct cgroup_subsys_state *root);
902
903 /* Get id and depth of css */
904 unsigned short css_id(struct cgroup_subsys_state *css);
905 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
906
907 #else /* !CONFIG_CGROUPS */
908
909 static inline int cgroup_init_early(void) { return 0; }
910 static inline int cgroup_init(void) { return 0; }
911 static inline void cgroup_fork(struct task_struct *p) {}
912 static inline void cgroup_post_fork(struct task_struct *p) {}
913 static inline void cgroup_exit(struct task_struct *p, int callbacks) {}
914
915 static inline int cgroupstats_build(struct cgroupstats *stats,
916 struct dentry *dentry)
917 {
918 return -EINVAL;
919 }
920
921 /* No cgroups - nothing to do */
922 static inline int cgroup_attach_task_all(struct task_struct *from,
923 struct task_struct *t)
924 {
925 return 0;
926 }
927
928 #endif /* !CONFIG_CGROUPS */
929
930 #endif /* _LINUX_CGROUP_H */
This page took 0.069025 seconds and 6 git commands to generate.