freezer: convert freezable helpers to freezer_do_not_count()
[deliverable/linux.git] / include / linux / freezer.h
1 /* Freezer declarations */
2
3 #ifndef FREEZER_H_INCLUDED
4 #define FREEZER_H_INCLUDED
5
6 #include <linux/debug_locks.h>
7 #include <linux/sched.h>
8 #include <linux/wait.h>
9 #include <linux/atomic.h>
10
11 #ifdef CONFIG_FREEZER
12 extern atomic_t system_freezing_cnt; /* nr of freezing conds in effect */
13 extern bool pm_freezing; /* PM freezing in effect */
14 extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
15
16 /*
17 * Timeout for stopping processes
18 */
19 extern unsigned int freeze_timeout_msecs;
20
21 /*
22 * Check if a process has been frozen
23 */
24 static inline bool frozen(struct task_struct *p)
25 {
26 return p->flags & PF_FROZEN;
27 }
28
29 extern bool freezing_slow_path(struct task_struct *p);
30
31 /*
32 * Check if there is a request to freeze a process
33 */
34 static inline bool freezing(struct task_struct *p)
35 {
36 if (likely(!atomic_read(&system_freezing_cnt)))
37 return false;
38 return freezing_slow_path(p);
39 }
40
41 /* Takes and releases task alloc lock using task_lock() */
42 extern void __thaw_task(struct task_struct *t);
43
44 extern bool __refrigerator(bool check_kthr_stop);
45 extern int freeze_processes(void);
46 extern int freeze_kernel_threads(void);
47 extern void thaw_processes(void);
48 extern void thaw_kernel_threads(void);
49
50 /*
51 * DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION
52 * If try_to_freeze causes a lockdep warning it means the caller may deadlock
53 */
54 static inline bool try_to_freeze_unsafe(void)
55 {
56 might_sleep();
57 if (likely(!freezing(current)))
58 return false;
59 return __refrigerator(false);
60 }
61
62 static inline bool try_to_freeze(void)
63 {
64 if (!(current->flags & PF_NOFREEZE))
65 debug_check_no_locks_held();
66 return try_to_freeze_unsafe();
67 }
68
69 extern bool freeze_task(struct task_struct *p);
70 extern bool set_freezable(void);
71
72 #ifdef CONFIG_CGROUP_FREEZER
73 extern bool cgroup_freezing(struct task_struct *task);
74 #else /* !CONFIG_CGROUP_FREEZER */
75 static inline bool cgroup_freezing(struct task_struct *task)
76 {
77 return false;
78 }
79 #endif /* !CONFIG_CGROUP_FREEZER */
80
81 /*
82 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
83 * calls wait_for_completion(&vfork) and reset right after it returns from this
84 * function. Next, the parent should call try_to_freeze() to freeze itself
85 * appropriately in case the child has exited before the freezing of tasks is
86 * complete. However, we don't want kernel threads to be frozen in unexpected
87 * places, so we allow them to block freeze_processes() instead or to set
88 * PF_NOFREEZE if needed. Fortunately, in the ____call_usermodehelper() case the
89 * parent won't really block freeze_processes(), since ____call_usermodehelper()
90 * (the child) does a little before exec/exit and it can't be frozen before
91 * waking up the parent.
92 */
93
94
95 /**
96 * freezer_do_not_count - tell freezer to ignore %current
97 *
98 * Tell freezers to ignore the current task when determining whether the
99 * target frozen state is reached. IOW, the current task will be
100 * considered frozen enough by freezers.
101 *
102 * The caller shouldn't do anything which isn't allowed for a frozen task
103 * until freezer_cont() is called. Usually, freezer[_do_not]_count() pair
104 * wrap a scheduling operation and nothing much else.
105 */
106 static inline void freezer_do_not_count(void)
107 {
108 current->flags |= PF_FREEZER_SKIP;
109 }
110
111 /**
112 * freezer_count - tell freezer to stop ignoring %current
113 *
114 * Undo freezer_do_not_count(). It tells freezers that %current should be
115 * considered again and tries to freeze if freezing condition is already in
116 * effect.
117 */
118 static inline void freezer_count(void)
119 {
120 current->flags &= ~PF_FREEZER_SKIP;
121 /*
122 * If freezing is in progress, the following paired with smp_mb()
123 * in freezer_should_skip() ensures that either we see %true
124 * freezing() or freezer_should_skip() sees !PF_FREEZER_SKIP.
125 */
126 smp_mb();
127 try_to_freeze();
128 }
129
130 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
131 static inline void freezer_count_unsafe(void)
132 {
133 current->flags &= ~PF_FREEZER_SKIP;
134 smp_mb();
135 try_to_freeze_unsafe();
136 }
137
138 /**
139 * freezer_should_skip - whether to skip a task when determining frozen
140 * state is reached
141 * @p: task in quesion
142 *
143 * This function is used by freezers after establishing %true freezing() to
144 * test whether a task should be skipped when determining the target frozen
145 * state is reached. IOW, if this function returns %true, @p is considered
146 * frozen enough.
147 */
148 static inline bool freezer_should_skip(struct task_struct *p)
149 {
150 /*
151 * The following smp_mb() paired with the one in freezer_count()
152 * ensures that either freezer_count() sees %true freezing() or we
153 * see cleared %PF_FREEZER_SKIP and return %false. This makes it
154 * impossible for a task to slip frozen state testing after
155 * clearing %PF_FREEZER_SKIP.
156 */
157 smp_mb();
158 return p->flags & PF_FREEZER_SKIP;
159 }
160
161 /*
162 * These macros are intended to be used whenever you want allow a sleeping
163 * task to be frozen. Note that neither return any clear indication of
164 * whether a freeze event happened while in this function.
165 */
166
167 /* Like schedule(), but should not block the freezer. */
168 #define freezable_schedule() \
169 ({ \
170 freezer_do_not_count(); \
171 schedule(); \
172 freezer_count(); \
173 })
174
175 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
176 #define freezable_schedule_unsafe() \
177 ({ \
178 freezer_do_not_count(); \
179 schedule(); \
180 freezer_count_unsafe(); \
181 })
182
183 /* Like schedule_timeout_killable(), but should not block the freezer. */
184 #define freezable_schedule_timeout_killable(timeout) \
185 ({ \
186 long __retval; \
187 freezer_do_not_count(); \
188 __retval = schedule_timeout_killable(timeout); \
189 freezer_count(); \
190 __retval; \
191 })
192
193 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
194 #define freezable_schedule_timeout_killable_unsafe(timeout) \
195 ({ \
196 long __retval; \
197 freezer_do_not_count(); \
198 __retval = schedule_timeout_killable(timeout); \
199 freezer_count_unsafe(); \
200 __retval; \
201 })
202
203 /*
204 * Freezer-friendly wrappers around wait_event_interruptible(),
205 * wait_event_killable() and wait_event_interruptible_timeout(), originally
206 * defined in <linux/wait.h>
207 */
208
209 #define wait_event_freezekillable(wq, condition) \
210 ({ \
211 int __retval; \
212 freezer_do_not_count(); \
213 __retval = wait_event_killable(wq, (condition)); \
214 freezer_count(); \
215 __retval; \
216 })
217
218 /* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
219 #define wait_event_freezekillable_unsafe(wq, condition) \
220 ({ \
221 int __retval; \
222 freezer_do_not_count(); \
223 __retval = wait_event_killable(wq, (condition)); \
224 freezer_count_unsafe(); \
225 __retval; \
226 })
227
228 #define wait_event_freezable(wq, condition) \
229 ({ \
230 int __retval; \
231 freezer_do_not_count(); \
232 __retval = wait_event_interruptible(wq, (condition)); \
233 freezer_count(); \
234 __retval; \
235 })
236
237 #define wait_event_freezable_timeout(wq, condition, timeout) \
238 ({ \
239 long __retval = timeout; \
240 freezer_do_not_count(); \
241 __retval = wait_event_interruptible_timeout(wq, (condition), \
242 __retval); \
243 freezer_count(); \
244 __retval; \
245 })
246
247 #else /* !CONFIG_FREEZER */
248 static inline bool frozen(struct task_struct *p) { return false; }
249 static inline bool freezing(struct task_struct *p) { return false; }
250 static inline void __thaw_task(struct task_struct *t) {}
251
252 static inline bool __refrigerator(bool check_kthr_stop) { return false; }
253 static inline int freeze_processes(void) { return -ENOSYS; }
254 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
255 static inline void thaw_processes(void) {}
256 static inline void thaw_kernel_threads(void) {}
257
258 static inline bool try_to_freeze_nowarn(void) { return false; }
259 static inline bool try_to_freeze(void) { return false; }
260
261 static inline void freezer_do_not_count(void) {}
262 static inline void freezer_count(void) {}
263 static inline int freezer_should_skip(struct task_struct *p) { return 0; }
264 static inline void set_freezable(void) {}
265
266 #define freezable_schedule() schedule()
267
268 #define freezable_schedule_unsafe() schedule()
269
270 #define freezable_schedule_timeout_killable(timeout) \
271 schedule_timeout_killable(timeout)
272
273 #define freezable_schedule_timeout_killable_unsafe(timeout) \
274 schedule_timeout_killable(timeout)
275
276 #define wait_event_freezable(wq, condition) \
277 wait_event_interruptible(wq, condition)
278
279 #define wait_event_freezable_timeout(wq, condition, timeout) \
280 wait_event_interruptible_timeout(wq, condition, timeout)
281
282 #define wait_event_freezekillable(wq, condition) \
283 wait_event_killable(wq, condition)
284
285 #define wait_event_freezekillable_unsafe(wq, condition) \
286 wait_event_killable(wq, condition)
287
288 #endif /* !CONFIG_FREEZER */
289
290 #endif /* FREEZER_H_INCLUDED */
This page took 0.043946 seconds and 5 git commands to generate.