sched/wait: Provide infrastructure to deal with nested blocking
[deliverable/linux.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
fb869b6e
IM
3/*
4 * Linux wait queue related types and methods
5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
1da177e4 9#include <asm/current.h>
607ca46e 10#include <uapi/linux/wait.h>
1da177e4
LT
11
12typedef struct __wait_queue wait_queue_t;
7d478721
PZ
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4 15
61ada528
PZ
16/* __wait_queue::flags */
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
1da177e4 20struct __wait_queue {
fb869b6e 21 unsigned int flags;
fb869b6e
IM
22 void *private;
23 wait_queue_func_t func;
24 struct list_head task_list;
1da177e4
LT
25};
26
27struct wait_bit_key {
fb869b6e
IM
28 void *flags;
29 int bit_nr;
30#define WAIT_ATOMIC_T_BIT_NR -1
cbbce822 31 unsigned long timeout;
1da177e4
LT
32};
33
34struct wait_bit_queue {
fb869b6e
IM
35 struct wait_bit_key key;
36 wait_queue_t wait;
1da177e4
LT
37};
38
39struct __wait_queue_head {
fb869b6e
IM
40 spinlock_t lock;
41 struct list_head task_list;
1da177e4
LT
42};
43typedef struct __wait_queue_head wait_queue_head_t;
44
8c65b4a6 45struct task_struct;
1da177e4
LT
46
47/*
48 * Macros for declaration and initialisaton of the datatypes
49 */
50
51#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 52 .private = tsk, \
1da177e4
LT
53 .func = default_wake_function, \
54 .task_list = { NULL, NULL } }
55
56#define DECLARE_WAITQUEUE(name, tsk) \
57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 60 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
61 .task_list = { &(name).task_list, &(name).task_list } }
62
63#define DECLARE_WAIT_QUEUE_HEAD(name) \
64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
67 { .flags = word, .bit_nr = bit, }
68
cb65537e
DH
69#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
f07fdec5 72extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
73
74#define init_waitqueue_head(q) \
75 do { \
76 static struct lock_class_key __key; \
77 \
f07fdec5 78 __init_waitqueue_head((q), #q, &__key); \
2fc39111 79 } while (0)
1da177e4 80
7259f0d0
PZ
81#ifdef CONFIG_LOCKDEP
82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 ({ init_waitqueue_head(&name); name; })
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86#else
87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88#endif
89
1da177e4
LT
90static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91{
fb869b6e
IM
92 q->flags = 0;
93 q->private = p;
94 q->func = default_wake_function;
1da177e4
LT
95}
96
fb869b6e
IM
97static inline void
98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
1da177e4 99{
fb869b6e
IM
100 q->flags = 0;
101 q->private = NULL;
102 q->func = func;
1da177e4
LT
103}
104
105static inline int waitqueue_active(wait_queue_head_t *q)
106{
107 return !list_empty(&q->task_list);
108}
109
b3c97528
HH
110extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
111extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
112extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
113
114static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
115{
116 list_add(&new->task_list, &head->task_list);
117}
118
119/*
120 * Used for wake-one threads:
121 */
fb869b6e
IM
122static inline void
123__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
124{
125 wait->flags |= WQ_FLAG_EXCLUSIVE;
126 __add_wait_queue(q, wait);
127}
128
1da177e4 129static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 130 wait_queue_t *new)
1da177e4
LT
131{
132 list_add_tail(&new->task_list, &head->task_list);
133}
134
fb869b6e
IM
135static inline void
136__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
137{
138 wait->flags |= WQ_FLAG_EXCLUSIVE;
139 __add_wait_queue_tail(q, wait);
140}
141
fb869b6e
IM
142static inline void
143__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
1da177e4
LT
144{
145 list_del(&old->task_list);
146}
147
c1221321 148typedef int wait_bit_action_f(struct wait_bit_key *);
b3c97528 149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a 150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
fb869b6e 151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
63b20011 152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528 154void __wake_up_bit(wait_queue_head_t *, void *, int);
c1221321
N
155int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
156int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
b3c97528 157void wake_up_bit(void *, int);
cb65537e 158void wake_up_atomic_t(atomic_t *);
c1221321 159int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
cbbce822 160int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
c1221321 161int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
cb65537e 162int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
b3c97528 163wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 164
e64d66c8
MW
165#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
166#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
167#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
168#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
169#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 170
1da177e4
LT
171#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
172#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
173#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 174#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 175
0ccf831c 176/*
c0da3775 177 * Wakeup macros to be used to report events to the targets.
0ccf831c 178 */
fb869b6e 179#define wake_up_poll(x, m) \
c0da3775 180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
fb869b6e 181#define wake_up_locked_poll(x, m) \
c0da3775 182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
fb869b6e 183#define wake_up_interruptible_poll(x, m) \
c0da3775
DL
184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
185#define wake_up_interruptible_sync_poll(x, m) \
186 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 187
35a2af94 188#define ___wait_cond_timeout(condition) \
2953ef24 189({ \
fb869b6e
IM
190 bool __cond = (condition); \
191 if (__cond && !__ret) \
192 __ret = 1; \
193 __cond || !__ret; \
2953ef24
PZ
194})
195
c2d81644
ON
196#define ___wait_is_interruptible(state) \
197 (!__builtin_constant_p(state) || \
198 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
41a1431b 199
8b32201d
PZ
200/*
201 * The below macro ___wait_event() has an explicit shadow of the __ret
202 * variable when used from the wait_event_*() macros.
203 *
204 * This is so that both can use the ___wait_cond_timeout() construct
205 * to wrap the condition.
206 *
207 * The type inconsistency of the wait_event_*() __ret variable is also
208 * on purpose; we use long where we can return timeout values and int
209 * otherwise.
210 */
211
41a1431b 212#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
35a2af94 213({ \
41a1431b 214 __label__ __out; \
c2d81644 215 wait_queue_t __wait; \
8b32201d 216 long __ret = ret; /* explicit shadow */ \
41a1431b 217 \
c2d81644
ON
218 INIT_LIST_HEAD(&__wait.task_list); \
219 if (exclusive) \
220 __wait.flags = WQ_FLAG_EXCLUSIVE; \
221 else \
222 __wait.flags = 0; \
223 \
41a1431b 224 for (;;) { \
c2d81644 225 long __int = prepare_to_wait_event(&wq, &__wait, state);\
41a1431b
PZ
226 \
227 if (condition) \
228 break; \
229 \
c2d81644
ON
230 if (___wait_is_interruptible(state) && __int) { \
231 __ret = __int; \
41a1431b 232 if (exclusive) { \
fb869b6e
IM
233 abort_exclusive_wait(&wq, &__wait, \
234 state, NULL); \
41a1431b
PZ
235 goto __out; \
236 } \
237 break; \
238 } \
239 \
240 cmd; \
241 } \
242 finish_wait(&wq, &__wait); \
35a2af94
PZ
243__out: __ret; \
244})
41a1431b 245
fb869b6e 246#define __wait_event(wq, condition) \
35a2af94
PZ
247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
248 schedule())
1da177e4
LT
249
250/**
251 * wait_event - sleep until a condition gets true
252 * @wq: the waitqueue to wait on
253 * @condition: a C expression for the event to wait for
254 *
255 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
256 * @condition evaluates to true. The @condition is checked each time
257 * the waitqueue @wq is woken up.
258 *
259 * wake_up() has to be called after changing any variable that could
260 * change the result of the wait condition.
261 */
fb869b6e 262#define wait_event(wq, condition) \
1da177e4 263do { \
fb869b6e 264 if (condition) \
1da177e4
LT
265 break; \
266 __wait_event(wq, condition); \
267} while (0)
268
35a2af94
PZ
269#define __wait_event_timeout(wq, condition, timeout) \
270 ___wait_event(wq, ___wait_cond_timeout(condition), \
271 TASK_UNINTERRUPTIBLE, 0, timeout, \
272 __ret = schedule_timeout(__ret))
1da177e4
LT
273
274/**
275 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
276 * @wq: the waitqueue to wait on
277 * @condition: a C expression for the event to wait for
278 * @timeout: timeout, in jiffies
279 *
280 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
281 * @condition evaluates to true. The @condition is checked each time
282 * the waitqueue @wq is woken up.
283 *
284 * wake_up() has to be called after changing any variable that could
285 * change the result of the wait condition.
286 *
6b44f519
SD
287 * Returns:
288 * 0 if the @condition evaluated to %false after the @timeout elapsed,
289 * 1 if the @condition evaluated to %true after the @timeout elapsed,
290 * or the remaining jiffies (at least 1) if the @condition evaluated
291 * to %true before the @timeout elapsed.
1da177e4
LT
292 */
293#define wait_event_timeout(wq, condition, timeout) \
294({ \
295 long __ret = timeout; \
8922915b 296 if (!___wait_cond_timeout(condition)) \
35a2af94 297 __ret = __wait_event_timeout(wq, condition, timeout); \
1da177e4
LT
298 __ret; \
299})
300
82e06c81
SL
301#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
302 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
303 cmd1; schedule(); cmd2)
304
305/**
306 * wait_event_cmd - sleep until a condition gets true
307 * @wq: the waitqueue to wait on
308 * @condition: a C expression for the event to wait for
f434f7af
MI
309 * @cmd1: the command will be executed before sleep
310 * @cmd2: the command will be executed after sleep
82e06c81
SL
311 *
312 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
313 * @condition evaluates to true. The @condition is checked each time
314 * the waitqueue @wq is woken up.
315 *
316 * wake_up() has to be called after changing any variable that could
317 * change the result of the wait condition.
318 */
319#define wait_event_cmd(wq, condition, cmd1, cmd2) \
320do { \
321 if (condition) \
322 break; \
323 __wait_event_cmd(wq, condition, cmd1, cmd2); \
324} while (0)
325
35a2af94
PZ
326#define __wait_event_interruptible(wq, condition) \
327 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
f13f4c41 328 schedule())
1da177e4
LT
329
330/**
331 * wait_event_interruptible - sleep until a condition gets true
332 * @wq: the waitqueue to wait on
333 * @condition: a C expression for the event to wait for
334 *
335 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
336 * @condition evaluates to true or a signal is received.
337 * The @condition is checked each time the waitqueue @wq is woken up.
338 *
339 * wake_up() has to be called after changing any variable that could
340 * change the result of the wait condition.
341 *
342 * The function will return -ERESTARTSYS if it was interrupted by a
343 * signal and 0 if @condition evaluated to true.
344 */
345#define wait_event_interruptible(wq, condition) \
346({ \
347 int __ret = 0; \
348 if (!(condition)) \
35a2af94 349 __ret = __wait_event_interruptible(wq, condition); \
1da177e4
LT
350 __ret; \
351})
352
35a2af94
PZ
353#define __wait_event_interruptible_timeout(wq, condition, timeout) \
354 ___wait_event(wq, ___wait_cond_timeout(condition), \
355 TASK_INTERRUPTIBLE, 0, timeout, \
356 __ret = schedule_timeout(__ret))
1da177e4
LT
357
358/**
359 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
360 * @wq: the waitqueue to wait on
361 * @condition: a C expression for the event to wait for
362 * @timeout: timeout, in jiffies
363 *
364 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
365 * @condition evaluates to true or a signal is received.
366 * The @condition is checked each time the waitqueue @wq is woken up.
367 *
368 * wake_up() has to be called after changing any variable that could
369 * change the result of the wait condition.
370 *
4c663cfc 371 * Returns:
6b44f519
SD
372 * 0 if the @condition evaluated to %false after the @timeout elapsed,
373 * 1 if the @condition evaluated to %true after the @timeout elapsed,
374 * the remaining jiffies (at least 1) if the @condition evaluated
375 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
376 * interrupted by a signal.
1da177e4
LT
377 */
378#define wait_event_interruptible_timeout(wq, condition, timeout) \
379({ \
380 long __ret = timeout; \
8922915b 381 if (!___wait_cond_timeout(condition)) \
fb869b6e 382 __ret = __wait_event_interruptible_timeout(wq, \
35a2af94 383 condition, timeout); \
1da177e4
LT
384 __ret; \
385})
386
774a08b3
KO
387#define __wait_event_hrtimeout(wq, condition, timeout, state) \
388({ \
389 int __ret = 0; \
774a08b3
KO
390 struct hrtimer_sleeper __t; \
391 \
392 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
393 HRTIMER_MODE_REL); \
394 hrtimer_init_sleeper(&__t, current); \
395 if ((timeout).tv64 != KTIME_MAX) \
396 hrtimer_start_range_ns(&__t.timer, timeout, \
397 current->timer_slack_ns, \
398 HRTIMER_MODE_REL); \
399 \
35a2af94 400 __ret = ___wait_event(wq, condition, state, 0, 0, \
774a08b3
KO
401 if (!__t.task) { \
402 __ret = -ETIME; \
403 break; \
404 } \
ebdc195f 405 schedule()); \
774a08b3
KO
406 \
407 hrtimer_cancel(&__t.timer); \
408 destroy_hrtimer_on_stack(&__t.timer); \
774a08b3
KO
409 __ret; \
410})
411
412/**
413 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
414 * @wq: the waitqueue to wait on
415 * @condition: a C expression for the event to wait for
416 * @timeout: timeout, as a ktime_t
417 *
418 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
419 * @condition evaluates to true or a signal is received.
420 * The @condition is checked each time the waitqueue @wq is woken up.
421 *
422 * wake_up() has to be called after changing any variable that could
423 * change the result of the wait condition.
424 *
425 * The function returns 0 if @condition became true, or -ETIME if the timeout
426 * elapsed.
427 */
428#define wait_event_hrtimeout(wq, condition, timeout) \
429({ \
430 int __ret = 0; \
431 if (!(condition)) \
432 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
433 TASK_UNINTERRUPTIBLE); \
434 __ret; \
435})
436
437/**
438 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
439 * @wq: the waitqueue to wait on
440 * @condition: a C expression for the event to wait for
441 * @timeout: timeout, as a ktime_t
442 *
443 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
444 * @condition evaluates to true or a signal is received.
445 * The @condition is checked each time the waitqueue @wq is woken up.
446 *
447 * wake_up() has to be called after changing any variable that could
448 * change the result of the wait condition.
449 *
450 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
451 * interrupted by a signal, or -ETIME if the timeout elapsed.
452 */
453#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
454({ \
455 long __ret = 0; \
456 if (!(condition)) \
457 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
458 TASK_INTERRUPTIBLE); \
459 __ret; \
460})
461
35a2af94
PZ
462#define __wait_event_interruptible_exclusive(wq, condition) \
463 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
48c25217 464 schedule())
1da177e4
LT
465
466#define wait_event_interruptible_exclusive(wq, condition) \
467({ \
468 int __ret = 0; \
469 if (!(condition)) \
35a2af94 470 __ret = __wait_event_interruptible_exclusive(wq, condition);\
1da177e4
LT
471 __ret; \
472})
473
22c43c81
MN
474
475#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
476({ \
477 int __ret = 0; \
478 DEFINE_WAIT(__wait); \
479 if (exclusive) \
480 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
481 do { \
482 if (likely(list_empty(&__wait.task_list))) \
483 __add_wait_queue_tail(&(wq), &__wait); \
484 set_current_state(TASK_INTERRUPTIBLE); \
485 if (signal_pending(current)) { \
486 __ret = -ERESTARTSYS; \
487 break; \
488 } \
489 if (irq) \
490 spin_unlock_irq(&(wq).lock); \
491 else \
492 spin_unlock(&(wq).lock); \
493 schedule(); \
494 if (irq) \
495 spin_lock_irq(&(wq).lock); \
496 else \
497 spin_lock(&(wq).lock); \
498 } while (!(condition)); \
499 __remove_wait_queue(&(wq), &__wait); \
500 __set_current_state(TASK_RUNNING); \
501 __ret; \
502})
503
504
505/**
506 * wait_event_interruptible_locked - sleep until a condition gets true
507 * @wq: the waitqueue to wait on
508 * @condition: a C expression for the event to wait for
509 *
510 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
511 * @condition evaluates to true or a signal is received.
512 * The @condition is checked each time the waitqueue @wq is woken up.
513 *
514 * It must be called with wq.lock being held. This spinlock is
515 * unlocked while sleeping but @condition testing is done while lock
516 * is held and when this macro exits the lock is held.
517 *
518 * The lock is locked/unlocked using spin_lock()/spin_unlock()
519 * functions which must match the way they are locked/unlocked outside
520 * of this macro.
521 *
522 * wake_up_locked() has to be called after changing any variable that could
523 * change the result of the wait condition.
524 *
525 * The function will return -ERESTARTSYS if it was interrupted by a
526 * signal and 0 if @condition evaluated to true.
527 */
528#define wait_event_interruptible_locked(wq, condition) \
529 ((condition) \
530 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
531
532/**
533 * wait_event_interruptible_locked_irq - sleep until a condition gets true
534 * @wq: the waitqueue to wait on
535 * @condition: a C expression for the event to wait for
536 *
537 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
538 * @condition evaluates to true or a signal is received.
539 * The @condition is checked each time the waitqueue @wq is woken up.
540 *
541 * It must be called with wq.lock being held. This spinlock is
542 * unlocked while sleeping but @condition testing is done while lock
543 * is held and when this macro exits the lock is held.
544 *
545 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
546 * functions which must match the way they are locked/unlocked outside
547 * of this macro.
548 *
549 * wake_up_locked() has to be called after changing any variable that could
550 * change the result of the wait condition.
551 *
552 * The function will return -ERESTARTSYS if it was interrupted by a
553 * signal and 0 if @condition evaluated to true.
554 */
555#define wait_event_interruptible_locked_irq(wq, condition) \
556 ((condition) \
557 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
558
559/**
560 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
561 * @wq: the waitqueue to wait on
562 * @condition: a C expression for the event to wait for
563 *
564 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
565 * @condition evaluates to true or a signal is received.
566 * The @condition is checked each time the waitqueue @wq is woken up.
567 *
568 * It must be called with wq.lock being held. This spinlock is
569 * unlocked while sleeping but @condition testing is done while lock
570 * is held and when this macro exits the lock is held.
571 *
572 * The lock is locked/unlocked using spin_lock()/spin_unlock()
573 * functions which must match the way they are locked/unlocked outside
574 * of this macro.
575 *
576 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
577 * set thus when other process waits process on the list if this
578 * process is awaken further processes are not considered.
579 *
580 * wake_up_locked() has to be called after changing any variable that could
581 * change the result of the wait condition.
582 *
583 * The function will return -ERESTARTSYS if it was interrupted by a
584 * signal and 0 if @condition evaluated to true.
585 */
586#define wait_event_interruptible_exclusive_locked(wq, condition) \
587 ((condition) \
588 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
589
590/**
591 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
592 * @wq: the waitqueue to wait on
593 * @condition: a C expression for the event to wait for
594 *
595 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
596 * @condition evaluates to true or a signal is received.
597 * The @condition is checked each time the waitqueue @wq is woken up.
598 *
599 * It must be called with wq.lock being held. This spinlock is
600 * unlocked while sleeping but @condition testing is done while lock
601 * is held and when this macro exits the lock is held.
602 *
603 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
604 * functions which must match the way they are locked/unlocked outside
605 * of this macro.
606 *
607 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
608 * set thus when other process waits process on the list if this
609 * process is awaken further processes are not considered.
610 *
611 * wake_up_locked() has to be called after changing any variable that could
612 * change the result of the wait condition.
613 *
614 * The function will return -ERESTARTSYS if it was interrupted by a
615 * signal and 0 if @condition evaluated to true.
616 */
617#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
618 ((condition) \
619 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
620
621
35a2af94
PZ
622#define __wait_event_killable(wq, condition) \
623 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
1411d5a7
MW
624
625/**
626 * wait_event_killable - sleep until a condition gets true
627 * @wq: the waitqueue to wait on
628 * @condition: a C expression for the event to wait for
629 *
630 * The process is put to sleep (TASK_KILLABLE) until the
631 * @condition evaluates to true or a signal is received.
632 * The @condition is checked each time the waitqueue @wq is woken up.
633 *
634 * wake_up() has to be called after changing any variable that could
635 * change the result of the wait condition.
636 *
637 * The function will return -ERESTARTSYS if it was interrupted by a
638 * signal and 0 if @condition evaluated to true.
639 */
640#define wait_event_killable(wq, condition) \
641({ \
642 int __ret = 0; \
643 if (!(condition)) \
35a2af94 644 __ret = __wait_event_killable(wq, condition); \
1411d5a7
MW
645 __ret; \
646})
647
eed8c02e
LC
648
649#define __wait_event_lock_irq(wq, condition, lock, cmd) \
35a2af94
PZ
650 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
651 spin_unlock_irq(&lock); \
652 cmd; \
653 schedule(); \
654 spin_lock_irq(&lock))
eed8c02e
LC
655
656/**
657 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
658 * condition is checked under the lock. This
659 * is expected to be called with the lock
660 * taken.
661 * @wq: the waitqueue to wait on
662 * @condition: a C expression for the event to wait for
663 * @lock: a locked spinlock_t, which will be released before cmd
664 * and schedule() and reacquired afterwards.
665 * @cmd: a command which is invoked outside the critical section before
666 * sleep
667 *
668 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
669 * @condition evaluates to true. The @condition is checked each time
670 * the waitqueue @wq is woken up.
671 *
672 * wake_up() has to be called after changing any variable that could
673 * change the result of the wait condition.
674 *
675 * This is supposed to be called while holding the lock. The lock is
676 * dropped before invoking the cmd and going to sleep and is reacquired
677 * afterwards.
678 */
679#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
680do { \
681 if (condition) \
682 break; \
683 __wait_event_lock_irq(wq, condition, lock, cmd); \
684} while (0)
685
686/**
687 * wait_event_lock_irq - sleep until a condition gets true. The
688 * condition is checked under the lock. This
689 * is expected to be called with the lock
690 * taken.
691 * @wq: the waitqueue to wait on
692 * @condition: a C expression for the event to wait for
693 * @lock: a locked spinlock_t, which will be released before schedule()
694 * and reacquired afterwards.
695 *
696 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
697 * @condition evaluates to true. The @condition is checked each time
698 * the waitqueue @wq is woken up.
699 *
700 * wake_up() has to be called after changing any variable that could
701 * change the result of the wait condition.
702 *
703 * This is supposed to be called while holding the lock. The lock is
704 * dropped before going to sleep and is reacquired afterwards.
705 */
706#define wait_event_lock_irq(wq, condition, lock) \
707do { \
708 if (condition) \
709 break; \
710 __wait_event_lock_irq(wq, condition, lock, ); \
711} while (0)
712
713
35a2af94 714#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
fb869b6e 715 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
35a2af94
PZ
716 spin_unlock_irq(&lock); \
717 cmd; \
718 schedule(); \
8fbd88fa 719 spin_lock_irq(&lock))
eed8c02e
LC
720
721/**
722 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
723 * The condition is checked under the lock. This is expected to
724 * be called with the lock taken.
725 * @wq: the waitqueue to wait on
726 * @condition: a C expression for the event to wait for
727 * @lock: a locked spinlock_t, which will be released before cmd and
728 * schedule() and reacquired afterwards.
729 * @cmd: a command which is invoked outside the critical section before
730 * sleep
731 *
732 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
733 * @condition evaluates to true or a signal is received. The @condition is
734 * checked each time the waitqueue @wq is woken up.
735 *
736 * wake_up() has to be called after changing any variable that could
737 * change the result of the wait condition.
738 *
739 * This is supposed to be called while holding the lock. The lock is
740 * dropped before invoking the cmd and going to sleep and is reacquired
741 * afterwards.
742 *
743 * The macro will return -ERESTARTSYS if it was interrupted by a signal
744 * and 0 if @condition evaluated to true.
745 */
746#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
747({ \
748 int __ret = 0; \
eed8c02e 749 if (!(condition)) \
fb869b6e 750 __ret = __wait_event_interruptible_lock_irq(wq, \
35a2af94 751 condition, lock, cmd); \
eed8c02e
LC
752 __ret; \
753})
754
755/**
756 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
757 * The condition is checked under the lock. This is expected
758 * to be called with the lock taken.
759 * @wq: the waitqueue to wait on
760 * @condition: a C expression for the event to wait for
761 * @lock: a locked spinlock_t, which will be released before schedule()
762 * and reacquired afterwards.
763 *
764 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
765 * @condition evaluates to true or signal is received. The @condition is
766 * checked each time the waitqueue @wq is woken up.
767 *
768 * wake_up() has to be called after changing any variable that could
769 * change the result of the wait condition.
770 *
771 * This is supposed to be called while holding the lock. The lock is
772 * dropped before going to sleep and is reacquired afterwards.
773 *
774 * The macro will return -ERESTARTSYS if it was interrupted by a signal
775 * and 0 if @condition evaluated to true.
776 */
777#define wait_event_interruptible_lock_irq(wq, condition, lock) \
778({ \
779 int __ret = 0; \
eed8c02e 780 if (!(condition)) \
35a2af94 781 __ret = __wait_event_interruptible_lock_irq(wq, \
92ec1180 782 condition, lock,); \
eed8c02e
LC
783 __ret; \
784})
785
fb869b6e
IM
786#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
787 lock, timeout) \
35a2af94 788 ___wait_event(wq, ___wait_cond_timeout(condition), \
7d716456 789 TASK_INTERRUPTIBLE, 0, timeout, \
35a2af94
PZ
790 spin_unlock_irq(&lock); \
791 __ret = schedule_timeout(__ret); \
a1dc6852 792 spin_lock_irq(&lock));
d79ff142
MP
793
794/**
fb869b6e
IM
795 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
796 * true or a timeout elapses. The condition is checked under
797 * the lock. This is expected to be called with the lock taken.
d79ff142
MP
798 * @wq: the waitqueue to wait on
799 * @condition: a C expression for the event to wait for
800 * @lock: a locked spinlock_t, which will be released before schedule()
801 * and reacquired afterwards.
802 * @timeout: timeout, in jiffies
803 *
804 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
805 * @condition evaluates to true or signal is received. The @condition is
806 * checked each time the waitqueue @wq is woken up.
807 *
808 * wake_up() has to be called after changing any variable that could
809 * change the result of the wait condition.
810 *
811 * This is supposed to be called while holding the lock. The lock is
812 * dropped before going to sleep and is reacquired afterwards.
813 *
814 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
815 * was interrupted by a signal, and the remaining jiffies otherwise
816 * if the condition evaluated to true before the timeout elapsed.
817 */
818#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
819 timeout) \
820({ \
35a2af94 821 long __ret = timeout; \
8922915b 822 if (!___wait_cond_timeout(condition)) \
35a2af94
PZ
823 __ret = __wait_event_interruptible_lock_irq_timeout( \
824 wq, condition, lock, timeout); \
d79ff142
MP
825 __ret; \
826})
827
1da177e4
LT
828/*
829 * Waitqueues which are removed from the waitqueue_head at wakeup time
830 */
b3c97528
HH
831void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
832void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
c2d81644 833long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
b3c97528 834void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
fb869b6e 835void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
61ada528
PZ
836long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
837int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
1da177e4
LT
838int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
839int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
840
bf368e4e 841#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 842 wait_queue_t name = { \
c43dc2fd 843 .private = current, \
bf368e4e 844 .func = function, \
7e43c84e 845 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
846 }
847
bf368e4e
ED
848#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
849
1da177e4
LT
850#define DEFINE_WAIT_BIT(name, word, bit) \
851 struct wait_bit_queue name = { \
852 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
853 .wait = { \
c43dc2fd 854 .private = current, \
1da177e4
LT
855 .func = wake_bit_function, \
856 .task_list = \
857 LIST_HEAD_INIT((name).wait.task_list), \
858 }, \
859 }
860
861#define init_wait(wait) \
862 do { \
c43dc2fd 863 (wait)->private = current; \
1da177e4
LT
864 (wait)->func = autoremove_wake_function; \
865 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 866 (wait)->flags = 0; \
1da177e4
LT
867 } while (0)
868
74316201 869
c1221321
N
870extern int bit_wait(struct wait_bit_key *);
871extern int bit_wait_io(struct wait_bit_key *);
cbbce822
N
872extern int bit_wait_timeout(struct wait_bit_key *);
873extern int bit_wait_io_timeout(struct wait_bit_key *);
74316201 874
1da177e4
LT
875/**
876 * wait_on_bit - wait for a bit to be cleared
877 * @word: the word being waited on, a kernel virtual address
878 * @bit: the bit of the word being waited on
1da177e4
LT
879 * @mode: the task state to sleep in
880 *
881 * There is a standard hashed waitqueue table for generic use. This
882 * is the part of the hashtable's accessor API that waits on a bit.
883 * For instance, if one were to have waiters on a bitflag, one would
884 * call wait_on_bit() in threads waiting for the bit to clear.
885 * One uses wait_on_bit() where one is waiting for the bit to clear,
886 * but has no intention of setting it.
74316201
N
887 * Returned value will be zero if the bit was cleared, or non-zero
888 * if the process received a signal and the mode permitted wakeup
889 * on that signal.
890 */
891static inline int
892wait_on_bit(void *word, int bit, unsigned mode)
893{
894 if (!test_bit(bit, word))
895 return 0;
896 return out_of_line_wait_on_bit(word, bit,
897 bit_wait,
898 mode);
899}
900
901/**
902 * wait_on_bit_io - wait for a bit to be cleared
903 * @word: the word being waited on, a kernel virtual address
904 * @bit: the bit of the word being waited on
905 * @mode: the task state to sleep in
906 *
907 * Use the standard hashed waitqueue table to wait for a bit
908 * to be cleared. This is similar to wait_on_bit(), but calls
909 * io_schedule() instead of schedule() for the actual waiting.
910 *
911 * Returned value will be zero if the bit was cleared, or non-zero
912 * if the process received a signal and the mode permitted wakeup
913 * on that signal.
914 */
915static inline int
916wait_on_bit_io(void *word, int bit, unsigned mode)
917{
918 if (!test_bit(bit, word))
919 return 0;
920 return out_of_line_wait_on_bit(word, bit,
921 bit_wait_io,
922 mode);
923}
924
925/**
926 * wait_on_bit_action - wait for a bit to be cleared
927 * @word: the word being waited on, a kernel virtual address
928 * @bit: the bit of the word being waited on
929 * @action: the function used to sleep, which may take special actions
930 * @mode: the task state to sleep in
931 *
932 * Use the standard hashed waitqueue table to wait for a bit
933 * to be cleared, and allow the waiting action to be specified.
934 * This is like wait_on_bit() but allows fine control of how the waiting
935 * is done.
936 *
937 * Returned value will be zero if the bit was cleared, or non-zero
938 * if the process received a signal and the mode permitted wakeup
939 * on that signal.
1da177e4 940 */
fb869b6e 941static inline int
c1221321 942wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1da177e4
LT
943{
944 if (!test_bit(bit, word))
945 return 0;
946 return out_of_line_wait_on_bit(word, bit, action, mode);
947}
948
949/**
950 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
951 * @word: the word being waited on, a kernel virtual address
952 * @bit: the bit of the word being waited on
1da177e4
LT
953 * @mode: the task state to sleep in
954 *
955 * There is a standard hashed waitqueue table for generic use. This
956 * is the part of the hashtable's accessor API that waits on a bit
957 * when one intends to set it, for instance, trying to lock bitflags.
958 * For instance, if one were to have waiters trying to set bitflag
959 * and waiting for it to clear before setting it, one would call
960 * wait_on_bit() in threads waiting to be able to set the bit.
961 * One uses wait_on_bit_lock() where one is waiting for the bit to
962 * clear with the intention of setting it, and when done, clearing it.
74316201
N
963 *
964 * Returns zero if the bit was (eventually) found to be clear and was
965 * set. Returns non-zero if a signal was delivered to the process and
966 * the @mode allows that signal to wake the process.
967 */
968static inline int
969wait_on_bit_lock(void *word, int bit, unsigned mode)
970{
971 if (!test_and_set_bit(bit, word))
972 return 0;
973 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
974}
975
976/**
977 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
978 * @word: the word being waited on, a kernel virtual address
979 * @bit: the bit of the word being waited on
980 * @mode: the task state to sleep in
981 *
982 * Use the standard hashed waitqueue table to wait for a bit
983 * to be cleared and then to atomically set it. This is similar
984 * to wait_on_bit(), but calls io_schedule() instead of schedule()
985 * for the actual waiting.
986 *
987 * Returns zero if the bit was (eventually) found to be clear and was
988 * set. Returns non-zero if a signal was delivered to the process and
989 * the @mode allows that signal to wake the process.
990 */
991static inline int
992wait_on_bit_lock_io(void *word, int bit, unsigned mode)
993{
994 if (!test_and_set_bit(bit, word))
995 return 0;
996 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
997}
998
999/**
1000 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1001 * @word: the word being waited on, a kernel virtual address
1002 * @bit: the bit of the word being waited on
1003 * @action: the function used to sleep, which may take special actions
1004 * @mode: the task state to sleep in
1005 *
1006 * Use the standard hashed waitqueue table to wait for a bit
1007 * to be cleared and then to set it, and allow the waiting action
1008 * to be specified.
1009 * This is like wait_on_bit() but allows fine control of how the waiting
1010 * is done.
1011 *
1012 * Returns zero if the bit was (eventually) found to be clear and was
1013 * set. Returns non-zero if a signal was delivered to the process and
1014 * the @mode allows that signal to wake the process.
1da177e4 1015 */
fb869b6e 1016static inline int
c1221321 1017wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1da177e4
LT
1018{
1019 if (!test_and_set_bit(bit, word))
1020 return 0;
1021 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1022}
cb65537e
DH
1023
1024/**
1025 * wait_on_atomic_t - Wait for an atomic_t to become 0
1026 * @val: The atomic value being waited on, a kernel virtual address
1027 * @action: the function used to sleep, which may take special actions
1028 * @mode: the task state to sleep in
1029 *
1030 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1031 * the purpose of getting a waitqueue, but we set the key to a bit number
1032 * outside of the target 'word'.
1033 */
1034static inline
1035int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1036{
1037 if (atomic_read(val) == 0)
1038 return 0;
1039 return out_of_line_wait_on_atomic_t(val, action, mode);
1040}
fb869b6e
IM
1041
1042#endif /* _LINUX_WAIT_H */
This page took 1.187604 seconds and 5 git commands to generate.