cgroup_freezer: prepare update_if_frozen() for locking change
[deliverable/linux.git] / kernel / cgroup_freezer.c
1 /*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17 #include <linux/export.h>
18 #include <linux/slab.h>
19 #include <linux/cgroup.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/freezer.h>
23 #include <linux/seq_file.h>
24
25 enum freezer_state {
26 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
29 };
30
31 struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35 };
36
37 static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39 {
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43 }
44
45 static inline struct freezer *task_freezer(struct task_struct *task)
46 {
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49 }
50
51 bool cgroup_freezing(struct task_struct *task)
52 {
53 enum freezer_state state;
54 bool ret;
55
56 rcu_read_lock();
57 state = task_freezer(task)->state;
58 ret = state == CGROUP_FREEZING || state == CGROUP_FROZEN;
59 rcu_read_unlock();
60
61 return ret;
62 }
63
64 /*
65 * cgroups_write_string() limits the size of freezer state strings to
66 * CGROUP_LOCAL_BUFFER_SIZE
67 */
68 static const char *freezer_state_strs[] = {
69 "THAWED",
70 "FREEZING",
71 "FROZEN",
72 };
73
74 /*
75 * State diagram
76 * Transitions are caused by userspace writes to the freezer.state file.
77 * The values in parenthesis are state labels. The rest are edge labels.
78 *
79 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
80 * ^ ^ | |
81 * | \_______THAWED_______/ |
82 * \__________________________THAWED____________/
83 */
84
85 struct cgroup_subsys freezer_subsys;
86
87 /* Locks taken and their ordering
88 * ------------------------------
89 * cgroup_mutex (AKA cgroup_lock)
90 * freezer->lock
91 * css_set_lock
92 * task->alloc_lock (AKA task_lock)
93 * task->sighand->siglock
94 *
95 * cgroup code forces css_set_lock to be taken before task->alloc_lock
96 *
97 * freezer_create(), freezer_destroy():
98 * cgroup_mutex [ by cgroup core ]
99 *
100 * freezer_can_attach():
101 * cgroup_mutex (held by caller of can_attach)
102 *
103 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
104 * freezer->lock
105 * sighand->siglock (if the cgroup is freezing)
106 *
107 * freezer_read():
108 * cgroup_mutex
109 * freezer->lock
110 * write_lock css_set_lock (cgroup iterator start)
111 * task->alloc_lock
112 * read_lock css_set_lock (cgroup iterator start)
113 *
114 * freezer_write() (freeze):
115 * cgroup_mutex
116 * freezer->lock
117 * write_lock css_set_lock (cgroup iterator start)
118 * task->alloc_lock
119 * read_lock css_set_lock (cgroup iterator start)
120 * sighand->siglock (fake signal delivery inside freeze_task())
121 *
122 * freezer_write() (unfreeze):
123 * cgroup_mutex
124 * freezer->lock
125 * write_lock css_set_lock (cgroup iterator start)
126 * task->alloc_lock
127 * read_lock css_set_lock (cgroup iterator start)
128 * task->alloc_lock (inside __thaw_task(), prevents race with refrigerator())
129 * sighand->siglock
130 */
131 static struct cgroup_subsys_state *freezer_create(struct cgroup *cgroup)
132 {
133 struct freezer *freezer;
134
135 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
136 if (!freezer)
137 return ERR_PTR(-ENOMEM);
138
139 spin_lock_init(&freezer->lock);
140 freezer->state = CGROUP_THAWED;
141 return &freezer->css;
142 }
143
144 static void freezer_destroy(struct cgroup *cgroup)
145 {
146 struct freezer *freezer = cgroup_freezer(cgroup);
147
148 if (freezer->state != CGROUP_THAWED)
149 atomic_dec(&system_freezing_cnt);
150 kfree(freezer);
151 }
152
153 /*
154 * The call to cgroup_lock() in the freezer.state write method prevents
155 * a write to that file racing against an attach, and hence we don't need
156 * to worry about racing against migration.
157 */
158 static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset)
159 {
160 struct freezer *freezer = cgroup_freezer(new_cgrp);
161 struct task_struct *task;
162
163 spin_lock_irq(&freezer->lock);
164
165 /*
166 * Make the new tasks conform to the current state of @new_cgrp.
167 * For simplicity, when migrating any task to a FROZEN cgroup, we
168 * revert it to FREEZING and let update_if_frozen() determine the
169 * correct state later.
170 *
171 * Tasks in @tset are on @new_cgrp but may not conform to its
172 * current state before executing the following - !frozen tasks may
173 * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
174 * This means that, to determine whether to freeze, one should test
175 * whether the state equals THAWED.
176 */
177 cgroup_taskset_for_each(task, new_cgrp, tset) {
178 if (freezer->state == CGROUP_THAWED) {
179 __thaw_task(task);
180 } else {
181 freeze_task(task);
182 freezer->state = CGROUP_FREEZING;
183 }
184 }
185
186 spin_unlock_irq(&freezer->lock);
187 }
188
189 static void freezer_fork(struct task_struct *task)
190 {
191 struct freezer *freezer;
192
193 rcu_read_lock();
194 freezer = task_freezer(task);
195
196 /*
197 * The root cgroup is non-freezable, so we can skip the
198 * following check.
199 */
200 if (!freezer->css.cgroup->parent)
201 goto out;
202
203 spin_lock_irq(&freezer->lock);
204 /*
205 * @task might have been just migrated into a FROZEN cgroup. Test
206 * equality with THAWED. Read the comment in freezer_attach().
207 */
208 if (freezer->state != CGROUP_THAWED)
209 freeze_task(task);
210 spin_unlock_irq(&freezer->lock);
211 out:
212 rcu_read_unlock();
213 }
214
215 /*
216 * We change from FREEZING to FROZEN lazily if the cgroup was only
217 * partially frozen when we exitted write. Caller must hold freezer->lock.
218 *
219 * Task states and freezer state might disagree while tasks are being
220 * migrated into @cgroup, so we can't verify task states against @freezer
221 * state here. See freezer_attach() for details.
222 */
223 static void update_if_frozen(struct cgroup *cgroup, struct freezer *freezer)
224 {
225 struct cgroup_iter it;
226 struct task_struct *task;
227
228 if (freezer->state != CGROUP_FREEZING)
229 return;
230
231 cgroup_iter_start(cgroup, &it);
232
233 while ((task = cgroup_iter_next(cgroup, &it))) {
234 if (freezing(task)) {
235 /*
236 * freezer_should_skip() indicates that the task
237 * should be skipped when determining freezing
238 * completion. Consider it frozen in addition to
239 * the usual frozen condition.
240 */
241 if (!frozen(task) && !task_is_stopped_or_traced(task) &&
242 !freezer_should_skip(task))
243 goto notyet;
244 }
245 }
246
247 freezer->state = CGROUP_FROZEN;
248 notyet:
249 cgroup_iter_end(cgroup, &it);
250 }
251
252 static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
253 struct seq_file *m)
254 {
255 struct freezer *freezer;
256 enum freezer_state state;
257
258 if (!cgroup_lock_live_group(cgroup))
259 return -ENODEV;
260
261 freezer = cgroup_freezer(cgroup);
262 spin_lock_irq(&freezer->lock);
263 update_if_frozen(cgroup, freezer);
264 state = freezer->state;
265 spin_unlock_irq(&freezer->lock);
266 cgroup_unlock();
267
268 seq_puts(m, freezer_state_strs[state]);
269 seq_putc(m, '\n');
270 return 0;
271 }
272
273 static void freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
274 {
275 struct cgroup_iter it;
276 struct task_struct *task;
277
278 cgroup_iter_start(cgroup, &it);
279 while ((task = cgroup_iter_next(cgroup, &it)))
280 freeze_task(task);
281 cgroup_iter_end(cgroup, &it);
282 }
283
284 static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
285 {
286 struct cgroup_iter it;
287 struct task_struct *task;
288
289 cgroup_iter_start(cgroup, &it);
290 while ((task = cgroup_iter_next(cgroup, &it)))
291 __thaw_task(task);
292 cgroup_iter_end(cgroup, &it);
293 }
294
295 static void freezer_change_state(struct cgroup *cgroup,
296 enum freezer_state goal_state)
297 {
298 struct freezer *freezer = cgroup_freezer(cgroup);
299
300 spin_lock_irq(&freezer->lock);
301
302 switch (goal_state) {
303 case CGROUP_THAWED:
304 if (freezer->state != CGROUP_THAWED)
305 atomic_dec(&system_freezing_cnt);
306 freezer->state = CGROUP_THAWED;
307 unfreeze_cgroup(cgroup, freezer);
308 break;
309 case CGROUP_FROZEN:
310 if (freezer->state == CGROUP_THAWED)
311 atomic_inc(&system_freezing_cnt);
312 freezer->state = CGROUP_FREEZING;
313 freeze_cgroup(cgroup, freezer);
314 break;
315 default:
316 BUG();
317 }
318
319 spin_unlock_irq(&freezer->lock);
320 }
321
322 static int freezer_write(struct cgroup *cgroup,
323 struct cftype *cft,
324 const char *buffer)
325 {
326 enum freezer_state goal_state;
327
328 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
329 goal_state = CGROUP_THAWED;
330 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
331 goal_state = CGROUP_FROZEN;
332 else
333 return -EINVAL;
334
335 if (!cgroup_lock_live_group(cgroup))
336 return -ENODEV;
337 freezer_change_state(cgroup, goal_state);
338 cgroup_unlock();
339 return 0;
340 }
341
342 static struct cftype files[] = {
343 {
344 .name = "state",
345 .flags = CFTYPE_NOT_ON_ROOT,
346 .read_seq_string = freezer_read,
347 .write_string = freezer_write,
348 },
349 { } /* terminate */
350 };
351
352 struct cgroup_subsys freezer_subsys = {
353 .name = "freezer",
354 .create = freezer_create,
355 .destroy = freezer_destroy,
356 .subsys_id = freezer_subsys_id,
357 .attach = freezer_attach,
358 .fork = freezer_fork,
359 .base_cftypes = files,
360
361 /*
362 * freezer subsys doesn't handle hierarchy at all. Frozen state
363 * should be inherited through the hierarchy - if a parent is
364 * frozen, all its children should be frozen. Fix it and remove
365 * the following.
366 */
367 .broken_hierarchy = true,
368 };
This page took 0.038795 seconds and 6 git commands to generate.