cgroup_freezer: unnecessary test in cgroup_freezing_or_frozen()
[deliverable/linux.git] / kernel / cgroup_freezer.c
CommitLineData
dc52ddc0
MH
1/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/module.h>
5a0e3ad6 18#include <linux/slab.h>
dc52ddc0
MH
19#include <linux/cgroup.h>
20#include <linux/fs.h>
21#include <linux/uaccess.h>
22#include <linux/freezer.h>
23#include <linux/seq_file.h>
24
25enum freezer_state {
81dcf33c
MH
26 CGROUP_THAWED = 0,
27 CGROUP_FREEZING,
28 CGROUP_FROZEN,
dc52ddc0
MH
29};
30
31struct freezer {
32 struct cgroup_subsys_state css;
33 enum freezer_state state;
34 spinlock_t lock; /* protects _writes_ to state */
35};
36
37static inline struct freezer *cgroup_freezer(
38 struct cgroup *cgroup)
39{
40 return container_of(
41 cgroup_subsys_state(cgroup, freezer_subsys_id),
42 struct freezer, css);
43}
44
45static inline struct freezer *task_freezer(struct task_struct *task)
46{
47 return container_of(task_subsys_state(task, freezer_subsys_id),
48 struct freezer, css);
49}
50
d5de4ddb 51static inline int __cgroup_freezing_or_frozen(struct task_struct *task)
dc52ddc0 52{
d5de4ddb
TB
53 enum freezer_state state = task_freezer(task)->state;
54 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
55}
dc52ddc0 56
d5de4ddb
TB
57int cgroup_freezing_or_frozen(struct task_struct *task)
58{
59 int result;
dc52ddc0 60 task_lock(task);
d5de4ddb 61 result = __cgroup_freezing_or_frozen(task);
dc52ddc0 62 task_unlock(task);
d5de4ddb 63 return result;
dc52ddc0
MH
64}
65
66/*
67 * cgroups_write_string() limits the size of freezer state strings to
68 * CGROUP_LOCAL_BUFFER_SIZE
69 */
70static const char *freezer_state_strs[] = {
81dcf33c 71 "THAWED",
dc52ddc0
MH
72 "FREEZING",
73 "FROZEN",
74};
75
76/*
77 * State diagram
78 * Transitions are caused by userspace writes to the freezer.state file.
79 * The values in parenthesis are state labels. The rest are edge labels.
80 *
81dcf33c
MH
81 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
82 * ^ ^ | |
83 * | \_______THAWED_______/ |
84 * \__________________________THAWED____________/
dc52ddc0
MH
85 */
86
87struct cgroup_subsys freezer_subsys;
88
89/* Locks taken and their ordering
90 * ------------------------------
dc52ddc0 91 * cgroup_mutex (AKA cgroup_lock)
dc52ddc0 92 * freezer->lock
8f77578c
MH
93 * css_set_lock
94 * task->alloc_lock (AKA task_lock)
dc52ddc0
MH
95 * task->sighand->siglock
96 *
97 * cgroup code forces css_set_lock to be taken before task->alloc_lock
98 *
99 * freezer_create(), freezer_destroy():
100 * cgroup_mutex [ by cgroup core ]
101 *
8f77578c
MH
102 * freezer_can_attach():
103 * cgroup_mutex (held by caller of can_attach)
dc52ddc0 104 *
8f77578c 105 * cgroup_freezing_or_frozen():
dc52ddc0
MH
106 * task->alloc_lock (to get task's cgroup)
107 *
108 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
dc52ddc0
MH
109 * freezer->lock
110 * sighand->siglock (if the cgroup is freezing)
111 *
112 * freezer_read():
113 * cgroup_mutex
114 * freezer->lock
8f77578c
MH
115 * write_lock css_set_lock (cgroup iterator start)
116 * task->alloc_lock
dc52ddc0
MH
117 * read_lock css_set_lock (cgroup iterator start)
118 *
119 * freezer_write() (freeze):
120 * cgroup_mutex
121 * freezer->lock
8f77578c
MH
122 * write_lock css_set_lock (cgroup iterator start)
123 * task->alloc_lock
dc52ddc0 124 * read_lock css_set_lock (cgroup iterator start)
8f77578c 125 * sighand->siglock (fake signal delivery inside freeze_task())
dc52ddc0
MH
126 *
127 * freezer_write() (unfreeze):
128 * cgroup_mutex
129 * freezer->lock
8f77578c
MH
130 * write_lock css_set_lock (cgroup iterator start)
131 * task->alloc_lock
dc52ddc0 132 * read_lock css_set_lock (cgroup iterator start)
8f77578c 133 * task->alloc_lock (inside thaw_process(), prevents race with refrigerator())
dc52ddc0
MH
134 * sighand->siglock
135 */
136static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
137 struct cgroup *cgroup)
138{
139 struct freezer *freezer;
140
141 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
142 if (!freezer)
143 return ERR_PTR(-ENOMEM);
144
145 spin_lock_init(&freezer->lock);
81dcf33c 146 freezer->state = CGROUP_THAWED;
dc52ddc0
MH
147 return &freezer->css;
148}
149
150static void freezer_destroy(struct cgroup_subsys *ss,
151 struct cgroup *cgroup)
152{
153 kfree(cgroup_freezer(cgroup));
154}
155
957a4eea
MH
156/* Task is frozen or will freeze immediately when next it gets woken */
157static bool is_task_frozen_enough(struct task_struct *task)
158{
159 return frozen(task) ||
160 (task_is_stopped_or_traced(task) && freezing(task));
161}
dc52ddc0 162
957a4eea
MH
163/*
164 * The call to cgroup_lock() in the freezer.state write method prevents
165 * a write to that file racing against an attach, and hence the
166 * can_attach() result will remain valid until the attach completes.
167 */
dc52ddc0
MH
168static int freezer_can_attach(struct cgroup_subsys *ss,
169 struct cgroup *new_cgroup,
be367d09 170 struct task_struct *task, bool threadgroup)
dc52ddc0
MH
171{
172 struct freezer *freezer;
957a4eea 173
80a6a2cf
LZ
174 /*
175 * Anything frozen can't move or be moved to/from.
176 *
177 * Since orig_freezer->state == FROZEN means that @task has been
178 * frozen, so it's sufficient to check the latter condition.
179 */
957a4eea
MH
180
181 if (is_task_frozen_enough(task))
182 return -EBUSY;
dc52ddc0 183
dc52ddc0 184 freezer = cgroup_freezer(new_cgroup);
81dcf33c 185 if (freezer->state == CGROUP_FROZEN)
957a4eea
MH
186 return -EBUSY;
187
be367d09
BB
188 if (threadgroup) {
189 struct task_struct *c;
190
191 rcu_read_lock();
192 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
193 if (is_task_frozen_enough(c)) {
194 rcu_read_unlock();
195 return -EBUSY;
196 }
197 }
198 rcu_read_unlock();
199 }
200
80a6a2cf 201 return 0;
dc52ddc0
MH
202}
203
204static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
205{
206 struct freezer *freezer;
207
68744676
LZ
208 /*
209 * No lock is needed, since the task isn't on tasklist yet,
210 * so it can't be moved to another cgroup, which means the
211 * freezer won't be removed and will be valid during this
8b46f880
PM
212 * function call. Nevertheless, apply RCU read-side critical
213 * section to suppress RCU lockdep false positives.
68744676 214 */
8b46f880 215 rcu_read_lock();
dc52ddc0 216 freezer = task_freezer(task);
8b46f880 217 rcu_read_unlock();
dc52ddc0 218
3b1b3f6e
LZ
219 /*
220 * The root cgroup is non-freezable, so we can skip the
221 * following check.
222 */
223 if (!freezer->css.cgroup->parent)
224 return;
225
dc52ddc0 226 spin_lock_irq(&freezer->lock);
7ccb9743
LZ
227 BUG_ON(freezer->state == CGROUP_FROZEN);
228
81dcf33c
MH
229 /* Locking avoids race with FREEZING -> THAWED transitions. */
230 if (freezer->state == CGROUP_FREEZING)
dc52ddc0
MH
231 freeze_task(task, true);
232 spin_unlock_irq(&freezer->lock);
233}
234
235/*
236 * caller must hold freezer->lock
237 */
1aece348
MH
238static void update_freezer_state(struct cgroup *cgroup,
239 struct freezer *freezer)
dc52ddc0
MH
240{
241 struct cgroup_iter it;
242 struct task_struct *task;
243 unsigned int nfrozen = 0, ntotal = 0;
244
245 cgroup_iter_start(cgroup, &it);
246 while ((task = cgroup_iter_next(cgroup, &it))) {
247 ntotal++;
957a4eea 248 if (is_task_frozen_enough(task))
dc52ddc0
MH
249 nfrozen++;
250 }
251
252 /*
253 * Transition to FROZEN when no new tasks can be added ensures
254 * that we never exist in the FROZEN state while there are unfrozen
255 * tasks.
256 */
257 if (nfrozen == ntotal)
81dcf33c 258 freezer->state = CGROUP_FROZEN;
1aece348
MH
259 else if (nfrozen > 0)
260 freezer->state = CGROUP_FREEZING;
261 else
262 freezer->state = CGROUP_THAWED;
dc52ddc0
MH
263 cgroup_iter_end(cgroup, &it);
264}
265
266static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
267 struct seq_file *m)
268{
269 struct freezer *freezer;
270 enum freezer_state state;
271
272 if (!cgroup_lock_live_group(cgroup))
273 return -ENODEV;
274
275 freezer = cgroup_freezer(cgroup);
276 spin_lock_irq(&freezer->lock);
277 state = freezer->state;
81dcf33c 278 if (state == CGROUP_FREEZING) {
dc52ddc0
MH
279 /* We change from FREEZING to FROZEN lazily if the cgroup was
280 * only partially frozen when we exitted write. */
1aece348 281 update_freezer_state(cgroup, freezer);
dc52ddc0
MH
282 state = freezer->state;
283 }
284 spin_unlock_irq(&freezer->lock);
285 cgroup_unlock();
286
287 seq_puts(m, freezer_state_strs[state]);
288 seq_putc(m, '\n');
289 return 0;
290}
291
292static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
293{
294 struct cgroup_iter it;
295 struct task_struct *task;
296 unsigned int num_cant_freeze_now = 0;
297
81dcf33c 298 freezer->state = CGROUP_FREEZING;
dc52ddc0
MH
299 cgroup_iter_start(cgroup, &it);
300 while ((task = cgroup_iter_next(cgroup, &it))) {
301 if (!freeze_task(task, true))
302 continue;
957a4eea 303 if (is_task_frozen_enough(task))
dc52ddc0
MH
304 continue;
305 if (!freezing(task) && !freezer_should_skip(task))
306 num_cant_freeze_now++;
307 }
308 cgroup_iter_end(cgroup, &it);
309
310 return num_cant_freeze_now ? -EBUSY : 0;
311}
312
00c2e63c 313static void unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
dc52ddc0
MH
314{
315 struct cgroup_iter it;
316 struct task_struct *task;
317
318 cgroup_iter_start(cgroup, &it);
319 while ((task = cgroup_iter_next(cgroup, &it))) {
00c2e63c 320 thaw_process(task);
dc52ddc0
MH
321 }
322 cgroup_iter_end(cgroup, &it);
dc52ddc0 323
00c2e63c 324 freezer->state = CGROUP_THAWED;
dc52ddc0
MH
325}
326
327static int freezer_change_state(struct cgroup *cgroup,
328 enum freezer_state goal_state)
329{
330 struct freezer *freezer;
331 int retval = 0;
332
333 freezer = cgroup_freezer(cgroup);
51308ee5 334
dc52ddc0 335 spin_lock_irq(&freezer->lock);
51308ee5 336
1aece348 337 update_freezer_state(cgroup, freezer);
dc52ddc0
MH
338 if (goal_state == freezer->state)
339 goto out;
51308ee5
LZ
340
341 switch (goal_state) {
81dcf33c 342 case CGROUP_THAWED:
51308ee5 343 unfreeze_cgroup(cgroup, freezer);
dc52ddc0 344 break;
81dcf33c 345 case CGROUP_FROZEN:
51308ee5 346 retval = try_to_freeze_cgroup(cgroup, freezer);
dc52ddc0
MH
347 break;
348 default:
51308ee5 349 BUG();
dc52ddc0
MH
350 }
351out:
352 spin_unlock_irq(&freezer->lock);
353
354 return retval;
355}
356
357static int freezer_write(struct cgroup *cgroup,
358 struct cftype *cft,
359 const char *buffer)
360{
361 int retval;
362 enum freezer_state goal_state;
363
81dcf33c
MH
364 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
365 goal_state = CGROUP_THAWED;
366 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
367 goal_state = CGROUP_FROZEN;
dc52ddc0 368 else
3b1b3f6e 369 return -EINVAL;
dc52ddc0
MH
370
371 if (!cgroup_lock_live_group(cgroup))
372 return -ENODEV;
373 retval = freezer_change_state(cgroup, goal_state);
374 cgroup_unlock();
375 return retval;
376}
377
378static struct cftype files[] = {
379 {
380 .name = "state",
381 .read_seq_string = freezer_read,
382 .write_string = freezer_write,
383 },
384};
385
386static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
387{
3b1b3f6e
LZ
388 if (!cgroup->parent)
389 return 0;
dc52ddc0
MH
390 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
391}
392
393struct cgroup_subsys freezer_subsys = {
394 .name = "freezer",
395 .create = freezer_create,
396 .destroy = freezer_destroy,
397 .populate = freezer_populate,
398 .subsys_id = freezer_subsys_id,
399 .can_attach = freezer_can_attach,
400 .attach = NULL,
401 .fork = freezer_fork,
402 .exit = NULL,
403};
This page took 0.232238 seconds and 5 git commands to generate.