Merge branch 'linus' into sched/core
[deliverable/linux.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
fb869b6e
IM
3/*
4 * Linux wait queue related types and methods
5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
1da177e4 9#include <asm/current.h>
607ca46e 10#include <uapi/linux/wait.h>
1da177e4
LT
11
12typedef struct __wait_queue wait_queue_t;
7d478721
PZ
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4
LT
15
16struct __wait_queue {
fb869b6e 17 unsigned int flags;
1da177e4 18#define WQ_FLAG_EXCLUSIVE 0x01
fb869b6e
IM
19 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
1da177e4
LT
22};
23
24struct wait_bit_key {
fb869b6e
IM
25 void *flags;
26 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1
1da177e4
LT
28};
29
30struct wait_bit_queue {
fb869b6e
IM
31 struct wait_bit_key key;
32 wait_queue_t wait;
1da177e4
LT
33};
34
35struct __wait_queue_head {
fb869b6e
IM
36 spinlock_t lock;
37 struct list_head task_list;
1da177e4
LT
38};
39typedef struct __wait_queue_head wait_queue_head_t;
40
8c65b4a6 41struct task_struct;
1da177e4
LT
42
43/*
44 * Macros for declaration and initialisaton of the datatypes
45 */
46
47#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 48 .private = tsk, \
1da177e4
LT
49 .func = default_wake_function, \
50 .task_list = { NULL, NULL } }
51
52#define DECLARE_WAITQUEUE(name, tsk) \
53 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
54
55#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 56 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
57 .task_list = { &(name).task_list, &(name).task_list } }
58
59#define DECLARE_WAIT_QUEUE_HEAD(name) \
60 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
61
62#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
63 { .flags = word, .bit_nr = bit, }
64
cb65537e
DH
65#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
66 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
67
f07fdec5 68extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
69
70#define init_waitqueue_head(q) \
71 do { \
72 static struct lock_class_key __key; \
73 \
f07fdec5 74 __init_waitqueue_head((q), #q, &__key); \
2fc39111 75 } while (0)
1da177e4 76
7259f0d0
PZ
77#ifdef CONFIG_LOCKDEP
78# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
79 ({ init_waitqueue_head(&name); name; })
80# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
81 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
82#else
83# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
84#endif
85
1da177e4
LT
86static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
87{
fb869b6e
IM
88 q->flags = 0;
89 q->private = p;
90 q->func = default_wake_function;
1da177e4
LT
91}
92
fb869b6e
IM
93static inline void
94init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
1da177e4 95{
fb869b6e
IM
96 q->flags = 0;
97 q->private = NULL;
98 q->func = func;
1da177e4
LT
99}
100
101static inline int waitqueue_active(wait_queue_head_t *q)
102{
103 return !list_empty(&q->task_list);
104}
105
b3c97528
HH
106extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
107extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
108extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
109
110static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
111{
112 list_add(&new->task_list, &head->task_list);
113}
114
115/*
116 * Used for wake-one threads:
117 */
fb869b6e
IM
118static inline void
119__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
120{
121 wait->flags |= WQ_FLAG_EXCLUSIVE;
122 __add_wait_queue(q, wait);
123}
124
1da177e4 125static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 126 wait_queue_t *new)
1da177e4
LT
127{
128 list_add_tail(&new->task_list, &head->task_list);
129}
130
fb869b6e
IM
131static inline void
132__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
a93d2f17
CG
133{
134 wait->flags |= WQ_FLAG_EXCLUSIVE;
135 __add_wait_queue_tail(q, wait);
136}
137
fb869b6e
IM
138static inline void
139__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
1da177e4
LT
140{
141 list_del(&old->task_list);
142}
143
b3c97528 144void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a 145void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
fb869b6e 146void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
63b20011 147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528
HH
149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
cb65537e 153void wake_up_atomic_t(atomic_t *);
b3c97528
HH
154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
cb65537e 156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
b3c97528 157wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 158
e64d66c8
MW
159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 164
1da177e4
LT
165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 169
0ccf831c 170/*
c0da3775 171 * Wakeup macros to be used to report events to the targets.
0ccf831c 172 */
fb869b6e 173#define wake_up_poll(x, m) \
c0da3775 174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
fb869b6e 175#define wake_up_locked_poll(x, m) \
c0da3775 176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
fb869b6e 177#define wake_up_interruptible_poll(x, m) \
c0da3775
DL
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 181
35a2af94 182#define ___wait_cond_timeout(condition) \
2953ef24 183({ \
fb869b6e
IM
184 bool __cond = (condition); \
185 if (__cond && !__ret) \
186 __ret = 1; \
187 __cond || !__ret; \
2953ef24
PZ
188})
189
c2d81644
ON
190#define ___wait_is_interruptible(state) \
191 (!__builtin_constant_p(state) || \
192 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
41a1431b 193
41a1431b 194#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
35a2af94 195({ \
41a1431b 196 __label__ __out; \
c2d81644 197 wait_queue_t __wait; \
35a2af94 198 long __ret = ret; \
41a1431b 199 \
c2d81644
ON
200 INIT_LIST_HEAD(&__wait.task_list); \
201 if (exclusive) \
202 __wait.flags = WQ_FLAG_EXCLUSIVE; \
203 else \
204 __wait.flags = 0; \
205 \
41a1431b 206 for (;;) { \
c2d81644 207 long __int = prepare_to_wait_event(&wq, &__wait, state);\
41a1431b
PZ
208 \
209 if (condition) \
210 break; \
211 \
c2d81644
ON
212 if (___wait_is_interruptible(state) && __int) { \
213 __ret = __int; \
41a1431b 214 if (exclusive) { \
fb869b6e
IM
215 abort_exclusive_wait(&wq, &__wait, \
216 state, NULL); \
41a1431b
PZ
217 goto __out; \
218 } \
219 break; \
220 } \
221 \
222 cmd; \
223 } \
224 finish_wait(&wq, &__wait); \
35a2af94
PZ
225__out: __ret; \
226})
41a1431b 227
fb869b6e 228#define __wait_event(wq, condition) \
35a2af94
PZ
229 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
230 schedule())
1da177e4
LT
231
232/**
233 * wait_event - sleep until a condition gets true
234 * @wq: the waitqueue to wait on
235 * @condition: a C expression for the event to wait for
236 *
237 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
238 * @condition evaluates to true. The @condition is checked each time
239 * the waitqueue @wq is woken up.
240 *
241 * wake_up() has to be called after changing any variable that could
242 * change the result of the wait condition.
243 */
fb869b6e 244#define wait_event(wq, condition) \
1da177e4 245do { \
fb869b6e 246 if (condition) \
1da177e4
LT
247 break; \
248 __wait_event(wq, condition); \
249} while (0)
250
35a2af94
PZ
251#define __wait_event_timeout(wq, condition, timeout) \
252 ___wait_event(wq, ___wait_cond_timeout(condition), \
253 TASK_UNINTERRUPTIBLE, 0, timeout, \
254 __ret = schedule_timeout(__ret))
1da177e4
LT
255
256/**
257 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
258 * @wq: the waitqueue to wait on
259 * @condition: a C expression for the event to wait for
260 * @timeout: timeout, in jiffies
261 *
262 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
263 * @condition evaluates to true. The @condition is checked each time
264 * the waitqueue @wq is woken up.
265 *
266 * wake_up() has to be called after changing any variable that could
267 * change the result of the wait condition.
268 *
4c663cfc
ID
269 * The function returns 0 if the @timeout elapsed, or the remaining
270 * jiffies (at least 1) if the @condition evaluated to %true before
271 * the @timeout elapsed.
1da177e4
LT
272 */
273#define wait_event_timeout(wq, condition, timeout) \
274({ \
275 long __ret = timeout; \
8922915b 276 if (!___wait_cond_timeout(condition)) \
35a2af94 277 __ret = __wait_event_timeout(wq, condition, timeout); \
1da177e4
LT
278 __ret; \
279})
280
35a2af94
PZ
281#define __wait_event_interruptible(wq, condition) \
282 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
f13f4c41 283 schedule())
1da177e4
LT
284
285/**
286 * wait_event_interruptible - sleep until a condition gets true
287 * @wq: the waitqueue to wait on
288 * @condition: a C expression for the event to wait for
289 *
290 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
291 * @condition evaluates to true or a signal is received.
292 * The @condition is checked each time the waitqueue @wq is woken up.
293 *
294 * wake_up() has to be called after changing any variable that could
295 * change the result of the wait condition.
296 *
297 * The function will return -ERESTARTSYS if it was interrupted by a
298 * signal and 0 if @condition evaluated to true.
299 */
300#define wait_event_interruptible(wq, condition) \
301({ \
302 int __ret = 0; \
303 if (!(condition)) \
35a2af94 304 __ret = __wait_event_interruptible(wq, condition); \
1da177e4
LT
305 __ret; \
306})
307
35a2af94
PZ
308#define __wait_event_interruptible_timeout(wq, condition, timeout) \
309 ___wait_event(wq, ___wait_cond_timeout(condition), \
310 TASK_INTERRUPTIBLE, 0, timeout, \
311 __ret = schedule_timeout(__ret))
1da177e4
LT
312
313/**
314 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
315 * @wq: the waitqueue to wait on
316 * @condition: a C expression for the event to wait for
317 * @timeout: timeout, in jiffies
318 *
319 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
320 * @condition evaluates to true or a signal is received.
321 * The @condition is checked each time the waitqueue @wq is woken up.
322 *
323 * wake_up() has to be called after changing any variable that could
324 * change the result of the wait condition.
325 *
4c663cfc
ID
326 * Returns:
327 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
328 * a signal, or the remaining jiffies (at least 1) if the @condition
329 * evaluated to %true before the @timeout elapsed.
1da177e4
LT
330 */
331#define wait_event_interruptible_timeout(wq, condition, timeout) \
332({ \
333 long __ret = timeout; \
8922915b 334 if (!___wait_cond_timeout(condition)) \
fb869b6e 335 __ret = __wait_event_interruptible_timeout(wq, \
35a2af94 336 condition, timeout); \
1da177e4
LT
337 __ret; \
338})
339
774a08b3
KO
340#define __wait_event_hrtimeout(wq, condition, timeout, state) \
341({ \
342 int __ret = 0; \
774a08b3
KO
343 struct hrtimer_sleeper __t; \
344 \
345 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
346 HRTIMER_MODE_REL); \
347 hrtimer_init_sleeper(&__t, current); \
348 if ((timeout).tv64 != KTIME_MAX) \
349 hrtimer_start_range_ns(&__t.timer, timeout, \
350 current->timer_slack_ns, \
351 HRTIMER_MODE_REL); \
352 \
35a2af94 353 __ret = ___wait_event(wq, condition, state, 0, 0, \
774a08b3
KO
354 if (!__t.task) { \
355 __ret = -ETIME; \
356 break; \
357 } \
ebdc195f 358 schedule()); \
774a08b3
KO
359 \
360 hrtimer_cancel(&__t.timer); \
361 destroy_hrtimer_on_stack(&__t.timer); \
774a08b3
KO
362 __ret; \
363})
364
365/**
366 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
367 * @wq: the waitqueue to wait on
368 * @condition: a C expression for the event to wait for
369 * @timeout: timeout, as a ktime_t
370 *
371 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
372 * @condition evaluates to true or a signal is received.
373 * The @condition is checked each time the waitqueue @wq is woken up.
374 *
375 * wake_up() has to be called after changing any variable that could
376 * change the result of the wait condition.
377 *
378 * The function returns 0 if @condition became true, or -ETIME if the timeout
379 * elapsed.
380 */
381#define wait_event_hrtimeout(wq, condition, timeout) \
382({ \
383 int __ret = 0; \
384 if (!(condition)) \
385 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
386 TASK_UNINTERRUPTIBLE); \
387 __ret; \
388})
389
390/**
391 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
392 * @wq: the waitqueue to wait on
393 * @condition: a C expression for the event to wait for
394 * @timeout: timeout, as a ktime_t
395 *
396 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
397 * @condition evaluates to true or a signal is received.
398 * The @condition is checked each time the waitqueue @wq is woken up.
399 *
400 * wake_up() has to be called after changing any variable that could
401 * change the result of the wait condition.
402 *
403 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
404 * interrupted by a signal, or -ETIME if the timeout elapsed.
405 */
406#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
407({ \
408 long __ret = 0; \
409 if (!(condition)) \
410 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
411 TASK_INTERRUPTIBLE); \
412 __ret; \
413})
414
35a2af94
PZ
415#define __wait_event_interruptible_exclusive(wq, condition) \
416 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
48c25217 417 schedule())
1da177e4
LT
418
419#define wait_event_interruptible_exclusive(wq, condition) \
420({ \
421 int __ret = 0; \
422 if (!(condition)) \
35a2af94 423 __ret = __wait_event_interruptible_exclusive(wq, condition);\
1da177e4
LT
424 __ret; \
425})
426
22c43c81
MN
427
428#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
429({ \
430 int __ret = 0; \
431 DEFINE_WAIT(__wait); \
432 if (exclusive) \
433 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
434 do { \
435 if (likely(list_empty(&__wait.task_list))) \
436 __add_wait_queue_tail(&(wq), &__wait); \
437 set_current_state(TASK_INTERRUPTIBLE); \
438 if (signal_pending(current)) { \
439 __ret = -ERESTARTSYS; \
440 break; \
441 } \
442 if (irq) \
443 spin_unlock_irq(&(wq).lock); \
444 else \
445 spin_unlock(&(wq).lock); \
446 schedule(); \
447 if (irq) \
448 spin_lock_irq(&(wq).lock); \
449 else \
450 spin_lock(&(wq).lock); \
451 } while (!(condition)); \
452 __remove_wait_queue(&(wq), &__wait); \
453 __set_current_state(TASK_RUNNING); \
454 __ret; \
455})
456
457
458/**
459 * wait_event_interruptible_locked - sleep until a condition gets true
460 * @wq: the waitqueue to wait on
461 * @condition: a C expression for the event to wait for
462 *
463 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
464 * @condition evaluates to true or a signal is received.
465 * The @condition is checked each time the waitqueue @wq is woken up.
466 *
467 * It must be called with wq.lock being held. This spinlock is
468 * unlocked while sleeping but @condition testing is done while lock
469 * is held and when this macro exits the lock is held.
470 *
471 * The lock is locked/unlocked using spin_lock()/spin_unlock()
472 * functions which must match the way they are locked/unlocked outside
473 * of this macro.
474 *
475 * wake_up_locked() has to be called after changing any variable that could
476 * change the result of the wait condition.
477 *
478 * The function will return -ERESTARTSYS if it was interrupted by a
479 * signal and 0 if @condition evaluated to true.
480 */
481#define wait_event_interruptible_locked(wq, condition) \
482 ((condition) \
483 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
484
485/**
486 * wait_event_interruptible_locked_irq - sleep until a condition gets true
487 * @wq: the waitqueue to wait on
488 * @condition: a C expression for the event to wait for
489 *
490 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
491 * @condition evaluates to true or a signal is received.
492 * The @condition is checked each time the waitqueue @wq is woken up.
493 *
494 * It must be called with wq.lock being held. This spinlock is
495 * unlocked while sleeping but @condition testing is done while lock
496 * is held and when this macro exits the lock is held.
497 *
498 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
499 * functions which must match the way they are locked/unlocked outside
500 * of this macro.
501 *
502 * wake_up_locked() has to be called after changing any variable that could
503 * change the result of the wait condition.
504 *
505 * The function will return -ERESTARTSYS if it was interrupted by a
506 * signal and 0 if @condition evaluated to true.
507 */
508#define wait_event_interruptible_locked_irq(wq, condition) \
509 ((condition) \
510 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
511
512/**
513 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
514 * @wq: the waitqueue to wait on
515 * @condition: a C expression for the event to wait for
516 *
517 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
518 * @condition evaluates to true or a signal is received.
519 * The @condition is checked each time the waitqueue @wq is woken up.
520 *
521 * It must be called with wq.lock being held. This spinlock is
522 * unlocked while sleeping but @condition testing is done while lock
523 * is held and when this macro exits the lock is held.
524 *
525 * The lock is locked/unlocked using spin_lock()/spin_unlock()
526 * functions which must match the way they are locked/unlocked outside
527 * of this macro.
528 *
529 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
530 * set thus when other process waits process on the list if this
531 * process is awaken further processes are not considered.
532 *
533 * wake_up_locked() has to be called after changing any variable that could
534 * change the result of the wait condition.
535 *
536 * The function will return -ERESTARTSYS if it was interrupted by a
537 * signal and 0 if @condition evaluated to true.
538 */
539#define wait_event_interruptible_exclusive_locked(wq, condition) \
540 ((condition) \
541 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
542
543/**
544 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
545 * @wq: the waitqueue to wait on
546 * @condition: a C expression for the event to wait for
547 *
548 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
549 * @condition evaluates to true or a signal is received.
550 * The @condition is checked each time the waitqueue @wq is woken up.
551 *
552 * It must be called with wq.lock being held. This spinlock is
553 * unlocked while sleeping but @condition testing is done while lock
554 * is held and when this macro exits the lock is held.
555 *
556 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
557 * functions which must match the way they are locked/unlocked outside
558 * of this macro.
559 *
560 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
561 * set thus when other process waits process on the list if this
562 * process is awaken further processes are not considered.
563 *
564 * wake_up_locked() has to be called after changing any variable that could
565 * change the result of the wait condition.
566 *
567 * The function will return -ERESTARTSYS if it was interrupted by a
568 * signal and 0 if @condition evaluated to true.
569 */
570#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
571 ((condition) \
572 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
573
574
35a2af94
PZ
575#define __wait_event_killable(wq, condition) \
576 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
1411d5a7
MW
577
578/**
579 * wait_event_killable - sleep until a condition gets true
580 * @wq: the waitqueue to wait on
581 * @condition: a C expression for the event to wait for
582 *
583 * The process is put to sleep (TASK_KILLABLE) until the
584 * @condition evaluates to true or a signal is received.
585 * The @condition is checked each time the waitqueue @wq is woken up.
586 *
587 * wake_up() has to be called after changing any variable that could
588 * change the result of the wait condition.
589 *
590 * The function will return -ERESTARTSYS if it was interrupted by a
591 * signal and 0 if @condition evaluated to true.
592 */
593#define wait_event_killable(wq, condition) \
594({ \
595 int __ret = 0; \
596 if (!(condition)) \
35a2af94 597 __ret = __wait_event_killable(wq, condition); \
1411d5a7
MW
598 __ret; \
599})
600
eed8c02e
LC
601
602#define __wait_event_lock_irq(wq, condition, lock, cmd) \
35a2af94
PZ
603 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
604 spin_unlock_irq(&lock); \
605 cmd; \
606 schedule(); \
607 spin_lock_irq(&lock))
eed8c02e
LC
608
609/**
610 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
611 * condition is checked under the lock. This
612 * is expected to be called with the lock
613 * taken.
614 * @wq: the waitqueue to wait on
615 * @condition: a C expression for the event to wait for
616 * @lock: a locked spinlock_t, which will be released before cmd
617 * and schedule() and reacquired afterwards.
618 * @cmd: a command which is invoked outside the critical section before
619 * sleep
620 *
621 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
622 * @condition evaluates to true. The @condition is checked each time
623 * the waitqueue @wq is woken up.
624 *
625 * wake_up() has to be called after changing any variable that could
626 * change the result of the wait condition.
627 *
628 * This is supposed to be called while holding the lock. The lock is
629 * dropped before invoking the cmd and going to sleep and is reacquired
630 * afterwards.
631 */
632#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
633do { \
634 if (condition) \
635 break; \
636 __wait_event_lock_irq(wq, condition, lock, cmd); \
637} while (0)
638
639/**
640 * wait_event_lock_irq - sleep until a condition gets true. The
641 * condition is checked under the lock. This
642 * is expected to be called with the lock
643 * taken.
644 * @wq: the waitqueue to wait on
645 * @condition: a C expression for the event to wait for
646 * @lock: a locked spinlock_t, which will be released before schedule()
647 * and reacquired afterwards.
648 *
649 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
650 * @condition evaluates to true. The @condition is checked each time
651 * the waitqueue @wq is woken up.
652 *
653 * wake_up() has to be called after changing any variable that could
654 * change the result of the wait condition.
655 *
656 * This is supposed to be called while holding the lock. The lock is
657 * dropped before going to sleep and is reacquired afterwards.
658 */
659#define wait_event_lock_irq(wq, condition, lock) \
660do { \
661 if (condition) \
662 break; \
663 __wait_event_lock_irq(wq, condition, lock, ); \
664} while (0)
665
666
35a2af94 667#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
fb869b6e 668 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
35a2af94
PZ
669 spin_unlock_irq(&lock); \
670 cmd; \
671 schedule(); \
8fbd88fa 672 spin_lock_irq(&lock))
eed8c02e
LC
673
674/**
675 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
676 * The condition is checked under the lock. This is expected to
677 * be called with the lock taken.
678 * @wq: the waitqueue to wait on
679 * @condition: a C expression for the event to wait for
680 * @lock: a locked spinlock_t, which will be released before cmd and
681 * schedule() and reacquired afterwards.
682 * @cmd: a command which is invoked outside the critical section before
683 * sleep
684 *
685 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
686 * @condition evaluates to true or a signal is received. The @condition is
687 * checked each time the waitqueue @wq is woken up.
688 *
689 * wake_up() has to be called after changing any variable that could
690 * change the result of the wait condition.
691 *
692 * This is supposed to be called while holding the lock. The lock is
693 * dropped before invoking the cmd and going to sleep and is reacquired
694 * afterwards.
695 *
696 * The macro will return -ERESTARTSYS if it was interrupted by a signal
697 * and 0 if @condition evaluated to true.
698 */
699#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
700({ \
701 int __ret = 0; \
eed8c02e 702 if (!(condition)) \
fb869b6e 703 __ret = __wait_event_interruptible_lock_irq(wq, \
35a2af94 704 condition, lock, cmd); \
eed8c02e
LC
705 __ret; \
706})
707
708/**
709 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
710 * The condition is checked under the lock. This is expected
711 * to be called with the lock taken.
712 * @wq: the waitqueue to wait on
713 * @condition: a C expression for the event to wait for
714 * @lock: a locked spinlock_t, which will be released before schedule()
715 * and reacquired afterwards.
716 *
717 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
718 * @condition evaluates to true or signal is received. The @condition is
719 * checked each time the waitqueue @wq is woken up.
720 *
721 * wake_up() has to be called after changing any variable that could
722 * change the result of the wait condition.
723 *
724 * This is supposed to be called while holding the lock. The lock is
725 * dropped before going to sleep and is reacquired afterwards.
726 *
727 * The macro will return -ERESTARTSYS if it was interrupted by a signal
728 * and 0 if @condition evaluated to true.
729 */
730#define wait_event_interruptible_lock_irq(wq, condition, lock) \
731({ \
732 int __ret = 0; \
eed8c02e 733 if (!(condition)) \
35a2af94 734 __ret = __wait_event_interruptible_lock_irq(wq, \
92ec1180 735 condition, lock,); \
eed8c02e
LC
736 __ret; \
737})
738
fb869b6e
IM
739#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
740 lock, timeout) \
35a2af94 741 ___wait_event(wq, ___wait_cond_timeout(condition), \
fb869b6e 742 TASK_INTERRUPTIBLE, 0, ret, \
35a2af94
PZ
743 spin_unlock_irq(&lock); \
744 __ret = schedule_timeout(__ret); \
a1dc6852 745 spin_lock_irq(&lock));
d79ff142
MP
746
747/**
fb869b6e
IM
748 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
749 * true or a timeout elapses. The condition is checked under
750 * the lock. This is expected to be called with the lock taken.
d79ff142
MP
751 * @wq: the waitqueue to wait on
752 * @condition: a C expression for the event to wait for
753 * @lock: a locked spinlock_t, which will be released before schedule()
754 * and reacquired afterwards.
755 * @timeout: timeout, in jiffies
756 *
757 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
758 * @condition evaluates to true or signal is received. The @condition is
759 * checked each time the waitqueue @wq is woken up.
760 *
761 * wake_up() has to be called after changing any variable that could
762 * change the result of the wait condition.
763 *
764 * This is supposed to be called while holding the lock. The lock is
765 * dropped before going to sleep and is reacquired afterwards.
766 *
767 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
768 * was interrupted by a signal, and the remaining jiffies otherwise
769 * if the condition evaluated to true before the timeout elapsed.
770 */
771#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
772 timeout) \
773({ \
35a2af94 774 long __ret = timeout; \
8922915b 775 if (!___wait_cond_timeout(condition)) \
35a2af94
PZ
776 __ret = __wait_event_interruptible_lock_irq_timeout( \
777 wq, condition, lock, timeout); \
d79ff142
MP
778 __ret; \
779})
780
eed8c02e 781
1da177e4
LT
782/*
783 * These are the old interfaces to sleep waiting for an event.
0fec171c
IM
784 * They are racy. DO NOT use them, use the wait_event* interfaces above.
785 * We plan to remove these interfaces.
1da177e4 786 */
0fec171c 787extern void sleep_on(wait_queue_head_t *q);
fb869b6e 788extern long sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
0fec171c 789extern void interruptible_sleep_on(wait_queue_head_t *q);
fb869b6e 790extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, signed long timeout);
1da177e4
LT
791
792/*
793 * Waitqueues which are removed from the waitqueue_head at wakeup time
794 */
b3c97528
HH
795void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
796void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
c2d81644 797long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
b3c97528 798void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
fb869b6e 799void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
1da177e4
LT
800int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
801int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
802
bf368e4e 803#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 804 wait_queue_t name = { \
c43dc2fd 805 .private = current, \
bf368e4e 806 .func = function, \
7e43c84e 807 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
808 }
809
bf368e4e
ED
810#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
811
1da177e4
LT
812#define DEFINE_WAIT_BIT(name, word, bit) \
813 struct wait_bit_queue name = { \
814 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
815 .wait = { \
c43dc2fd 816 .private = current, \
1da177e4
LT
817 .func = wake_bit_function, \
818 .task_list = \
819 LIST_HEAD_INIT((name).wait.task_list), \
820 }, \
821 }
822
823#define init_wait(wait) \
824 do { \
c43dc2fd 825 (wait)->private = current; \
1da177e4
LT
826 (wait)->func = autoremove_wake_function; \
827 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 828 (wait)->flags = 0; \
1da177e4
LT
829 } while (0)
830
831/**
832 * wait_on_bit - wait for a bit to be cleared
833 * @word: the word being waited on, a kernel virtual address
834 * @bit: the bit of the word being waited on
835 * @action: the function used to sleep, which may take special actions
836 * @mode: the task state to sleep in
837 *
838 * There is a standard hashed waitqueue table for generic use. This
839 * is the part of the hashtable's accessor API that waits on a bit.
840 * For instance, if one were to have waiters on a bitflag, one would
841 * call wait_on_bit() in threads waiting for the bit to clear.
842 * One uses wait_on_bit() where one is waiting for the bit to clear,
843 * but has no intention of setting it.
844 */
fb869b6e
IM
845static inline int
846wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
1da177e4
LT
847{
848 if (!test_bit(bit, word))
849 return 0;
850 return out_of_line_wait_on_bit(word, bit, action, mode);
851}
852
853/**
854 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
855 * @word: the word being waited on, a kernel virtual address
856 * @bit: the bit of the word being waited on
857 * @action: the function used to sleep, which may take special actions
858 * @mode: the task state to sleep in
859 *
860 * There is a standard hashed waitqueue table for generic use. This
861 * is the part of the hashtable's accessor API that waits on a bit
862 * when one intends to set it, for instance, trying to lock bitflags.
863 * For instance, if one were to have waiters trying to set bitflag
864 * and waiting for it to clear before setting it, one would call
865 * wait_on_bit() in threads waiting to be able to set the bit.
866 * One uses wait_on_bit_lock() where one is waiting for the bit to
867 * clear with the intention of setting it, and when done, clearing it.
868 */
fb869b6e
IM
869static inline int
870wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
1da177e4
LT
871{
872 if (!test_and_set_bit(bit, word))
873 return 0;
874 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
875}
cb65537e
DH
876
877/**
878 * wait_on_atomic_t - Wait for an atomic_t to become 0
879 * @val: The atomic value being waited on, a kernel virtual address
880 * @action: the function used to sleep, which may take special actions
881 * @mode: the task state to sleep in
882 *
883 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
884 * the purpose of getting a waitqueue, but we set the key to a bit number
885 * outside of the target 'word'.
886 */
887static inline
888int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
889{
890 if (atomic_read(val) == 0)
891 return 0;
892 return out_of_line_wait_on_atomic_t(val, action, mode);
893}
fb869b6e
IM
894
895#endif /* _LINUX_WAIT_H */
This page took 1.207913 seconds and 5 git commands to generate.