Add wait_on_atomic_t() and wake_up_atomic_t()
[deliverable/linux.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
1da177e4 4
1da177e4
LT
5#include <linux/list.h>
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
1da177e4 8#include <asm/current.h>
607ca46e 9#include <uapi/linux/wait.h>
1da177e4
LT
10
11typedef struct __wait_queue wait_queue_t;
7d478721
PZ
12typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
1da177e4
LT
14
15struct __wait_queue {
16 unsigned int flags;
17#define WQ_FLAG_EXCLUSIVE 0x01
c43dc2fd 18 void *private;
1da177e4
LT
19 wait_queue_func_t func;
20 struct list_head task_list;
21};
22
23struct wait_bit_key {
24 void *flags;
25 int bit_nr;
cb65537e 26#define WAIT_ATOMIC_T_BIT_NR -1
1da177e4
LT
27};
28
29struct wait_bit_queue {
30 struct wait_bit_key key;
31 wait_queue_t wait;
32};
33
34struct __wait_queue_head {
35 spinlock_t lock;
36 struct list_head task_list;
37};
38typedef struct __wait_queue_head wait_queue_head_t;
39
8c65b4a6 40struct task_struct;
1da177e4
LT
41
42/*
43 * Macros for declaration and initialisaton of the datatypes
44 */
45
46#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 47 .private = tsk, \
1da177e4
LT
48 .func = default_wake_function, \
49 .task_list = { NULL, NULL } }
50
51#define DECLARE_WAITQUEUE(name, tsk) \
52 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 55 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
56 .task_list = { &(name).task_list, &(name).task_list } }
57
58#define DECLARE_WAIT_QUEUE_HEAD(name) \
59 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
62 { .flags = word, .bit_nr = bit, }
63
cb65537e
DH
64#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
65 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
66
f07fdec5 67extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
2fc39111
PZ
68
69#define init_waitqueue_head(q) \
70 do { \
71 static struct lock_class_key __key; \
72 \
f07fdec5 73 __init_waitqueue_head((q), #q, &__key); \
2fc39111 74 } while (0)
1da177e4 75
7259f0d0
PZ
76#ifdef CONFIG_LOCKDEP
77# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
78 ({ init_waitqueue_head(&name); name; })
79# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
80 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
81#else
82# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
83#endif
84
1da177e4
LT
85static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
86{
87 q->flags = 0;
c43dc2fd 88 q->private = p;
1da177e4
LT
89 q->func = default_wake_function;
90}
91
92static inline void init_waitqueue_func_entry(wait_queue_t *q,
93 wait_queue_func_t func)
94{
95 q->flags = 0;
c43dc2fd 96 q->private = NULL;
1da177e4
LT
97 q->func = func;
98}
99
100static inline int waitqueue_active(wait_queue_head_t *q)
101{
102 return !list_empty(&q->task_list);
103}
104
b3c97528
HH
105extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
106extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
107extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
108
109static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
110{
111 list_add(&new->task_list, &head->task_list);
112}
113
114/*
115 * Used for wake-one threads:
116 */
a93d2f17
CG
117static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
118 wait_queue_t *wait)
119{
120 wait->flags |= WQ_FLAG_EXCLUSIVE;
121 __add_wait_queue(q, wait);
122}
123
1da177e4 124static inline void __add_wait_queue_tail(wait_queue_head_t *head,
a93d2f17 125 wait_queue_t *new)
1da177e4
LT
126{
127 list_add_tail(&new->task_list, &head->task_list);
128}
129
a93d2f17
CG
130static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q,
131 wait_queue_t *wait)
132{
133 wait->flags |= WQ_FLAG_EXCLUSIVE;
134 __add_wait_queue_tail(q, wait);
135}
136
1da177e4
LT
137static inline void __remove_wait_queue(wait_queue_head_t *head,
138 wait_queue_t *old)
139{
140 list_del(&old->task_list);
141}
142
b3c97528 143void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a
DL
144void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
145void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
146 void *key);
63b20011 147void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
4ede816a 148void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528
HH
149void __wake_up_bit(wait_queue_head_t *, void *, int);
150int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
152void wake_up_bit(void *, int);
cb65537e 153void wake_up_atomic_t(atomic_t *);
b3c97528
HH
154int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
155int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
cb65537e 156int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
b3c97528 157wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 158
e64d66c8
MW
159#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
160#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
161#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
63b20011
TG
162#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
163#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
e64d66c8 164
1da177e4
LT
165#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
166#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
167#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 168#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 169
0ccf831c 170/*
c0da3775 171 * Wakeup macros to be used to report events to the targets.
0ccf831c 172 */
c0da3775
DL
173#define wake_up_poll(x, m) \
174 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
175#define wake_up_locked_poll(x, m) \
176 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
177#define wake_up_interruptible_poll(x, m) \
178 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
179#define wake_up_interruptible_sync_poll(x, m) \
180 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
0ccf831c 181
1da177e4
LT
182#define __wait_event(wq, condition) \
183do { \
184 DEFINE_WAIT(__wait); \
185 \
186 for (;;) { \
187 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
188 if (condition) \
189 break; \
190 schedule(); \
191 } \
192 finish_wait(&wq, &__wait); \
193} while (0)
194
195/**
196 * wait_event - sleep until a condition gets true
197 * @wq: the waitqueue to wait on
198 * @condition: a C expression for the event to wait for
199 *
200 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
201 * @condition evaluates to true. The @condition is checked each time
202 * the waitqueue @wq is woken up.
203 *
204 * wake_up() has to be called after changing any variable that could
205 * change the result of the wait condition.
206 */
207#define wait_event(wq, condition) \
208do { \
209 if (condition) \
210 break; \
211 __wait_event(wq, condition); \
212} while (0)
213
214#define __wait_event_timeout(wq, condition, ret) \
215do { \
216 DEFINE_WAIT(__wait); \
217 \
218 for (;;) { \
219 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
220 if (condition) \
221 break; \
222 ret = schedule_timeout(ret); \
223 if (!ret) \
224 break; \
225 } \
226 finish_wait(&wq, &__wait); \
227} while (0)
228
229/**
230 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
231 * @wq: the waitqueue to wait on
232 * @condition: a C expression for the event to wait for
233 * @timeout: timeout, in jiffies
234 *
235 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
236 * @condition evaluates to true. The @condition is checked each time
237 * the waitqueue @wq is woken up.
238 *
239 * wake_up() has to be called after changing any variable that could
240 * change the result of the wait condition.
241 *
242 * The function returns 0 if the @timeout elapsed, and the remaining
243 * jiffies if the condition evaluated to true before the timeout elapsed.
244 */
245#define wait_event_timeout(wq, condition, timeout) \
246({ \
247 long __ret = timeout; \
248 if (!(condition)) \
249 __wait_event_timeout(wq, condition, __ret); \
250 __ret; \
251})
252
253#define __wait_event_interruptible(wq, condition, ret) \
254do { \
255 DEFINE_WAIT(__wait); \
256 \
257 for (;;) { \
258 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
259 if (condition) \
260 break; \
261 if (!signal_pending(current)) { \
262 schedule(); \
263 continue; \
264 } \
265 ret = -ERESTARTSYS; \
266 break; \
267 } \
268 finish_wait(&wq, &__wait); \
269} while (0)
270
271/**
272 * wait_event_interruptible - sleep until a condition gets true
273 * @wq: the waitqueue to wait on
274 * @condition: a C expression for the event to wait for
275 *
276 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
277 * @condition evaluates to true or a signal is received.
278 * The @condition is checked each time the waitqueue @wq is woken up.
279 *
280 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition.
282 *
283 * The function will return -ERESTARTSYS if it was interrupted by a
284 * signal and 0 if @condition evaluated to true.
285 */
286#define wait_event_interruptible(wq, condition) \
287({ \
288 int __ret = 0; \
289 if (!(condition)) \
290 __wait_event_interruptible(wq, condition, __ret); \
291 __ret; \
292})
293
294#define __wait_event_interruptible_timeout(wq, condition, ret) \
295do { \
296 DEFINE_WAIT(__wait); \
297 \
298 for (;;) { \
299 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
300 if (condition) \
301 break; \
302 if (!signal_pending(current)) { \
303 ret = schedule_timeout(ret); \
304 if (!ret) \
305 break; \
306 continue; \
307 } \
308 ret = -ERESTARTSYS; \
309 break; \
310 } \
311 finish_wait(&wq, &__wait); \
312} while (0)
313
314/**
315 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
316 * @wq: the waitqueue to wait on
317 * @condition: a C expression for the event to wait for
318 * @timeout: timeout, in jiffies
319 *
320 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
321 * @condition evaluates to true or a signal is received.
322 * The @condition is checked each time the waitqueue @wq is woken up.
323 *
324 * wake_up() has to be called after changing any variable that could
325 * change the result of the wait condition.
326 *
327 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
328 * was interrupted by a signal, and the remaining jiffies otherwise
329 * if the condition evaluated to true before the timeout elapsed.
330 */
331#define wait_event_interruptible_timeout(wq, condition, timeout) \
332({ \
333 long __ret = timeout; \
334 if (!(condition)) \
335 __wait_event_interruptible_timeout(wq, condition, __ret); \
336 __ret; \
337})
338
774a08b3
KO
339#define __wait_event_hrtimeout(wq, condition, timeout, state) \
340({ \
341 int __ret = 0; \
342 DEFINE_WAIT(__wait); \
343 struct hrtimer_sleeper __t; \
344 \
345 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
346 HRTIMER_MODE_REL); \
347 hrtimer_init_sleeper(&__t, current); \
348 if ((timeout).tv64 != KTIME_MAX) \
349 hrtimer_start_range_ns(&__t.timer, timeout, \
350 current->timer_slack_ns, \
351 HRTIMER_MODE_REL); \
352 \
353 for (;;) { \
354 prepare_to_wait(&wq, &__wait, state); \
355 if (condition) \
356 break; \
357 if (state == TASK_INTERRUPTIBLE && \
358 signal_pending(current)) { \
359 __ret = -ERESTARTSYS; \
360 break; \
361 } \
362 if (!__t.task) { \
363 __ret = -ETIME; \
364 break; \
365 } \
366 schedule(); \
367 } \
368 \
369 hrtimer_cancel(&__t.timer); \
370 destroy_hrtimer_on_stack(&__t.timer); \
371 finish_wait(&wq, &__wait); \
372 __ret; \
373})
374
375/**
376 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
377 * @wq: the waitqueue to wait on
378 * @condition: a C expression for the event to wait for
379 * @timeout: timeout, as a ktime_t
380 *
381 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
382 * @condition evaluates to true or a signal is received.
383 * The @condition is checked each time the waitqueue @wq is woken up.
384 *
385 * wake_up() has to be called after changing any variable that could
386 * change the result of the wait condition.
387 *
388 * The function returns 0 if @condition became true, or -ETIME if the timeout
389 * elapsed.
390 */
391#define wait_event_hrtimeout(wq, condition, timeout) \
392({ \
393 int __ret = 0; \
394 if (!(condition)) \
395 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
396 TASK_UNINTERRUPTIBLE); \
397 __ret; \
398})
399
400/**
401 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
402 * @wq: the waitqueue to wait on
403 * @condition: a C expression for the event to wait for
404 * @timeout: timeout, as a ktime_t
405 *
406 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
407 * @condition evaluates to true or a signal is received.
408 * The @condition is checked each time the waitqueue @wq is woken up.
409 *
410 * wake_up() has to be called after changing any variable that could
411 * change the result of the wait condition.
412 *
413 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
414 * interrupted by a signal, or -ETIME if the timeout elapsed.
415 */
416#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
417({ \
418 long __ret = 0; \
419 if (!(condition)) \
420 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
421 TASK_INTERRUPTIBLE); \
422 __ret; \
423})
424
1da177e4
LT
425#define __wait_event_interruptible_exclusive(wq, condition, ret) \
426do { \
427 DEFINE_WAIT(__wait); \
428 \
429 for (;;) { \
430 prepare_to_wait_exclusive(&wq, &__wait, \
431 TASK_INTERRUPTIBLE); \
777c6c5f
JW
432 if (condition) { \
433 finish_wait(&wq, &__wait); \
1da177e4 434 break; \
777c6c5f 435 } \
1da177e4
LT
436 if (!signal_pending(current)) { \
437 schedule(); \
438 continue; \
439 } \
440 ret = -ERESTARTSYS; \
777c6c5f
JW
441 abort_exclusive_wait(&wq, &__wait, \
442 TASK_INTERRUPTIBLE, NULL); \
1da177e4
LT
443 break; \
444 } \
1da177e4
LT
445} while (0)
446
447#define wait_event_interruptible_exclusive(wq, condition) \
448({ \
449 int __ret = 0; \
450 if (!(condition)) \
451 __wait_event_interruptible_exclusive(wq, condition, __ret);\
452 __ret; \
453})
454
22c43c81
MN
455
456#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
457({ \
458 int __ret = 0; \
459 DEFINE_WAIT(__wait); \
460 if (exclusive) \
461 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
462 do { \
463 if (likely(list_empty(&__wait.task_list))) \
464 __add_wait_queue_tail(&(wq), &__wait); \
465 set_current_state(TASK_INTERRUPTIBLE); \
466 if (signal_pending(current)) { \
467 __ret = -ERESTARTSYS; \
468 break; \
469 } \
470 if (irq) \
471 spin_unlock_irq(&(wq).lock); \
472 else \
473 spin_unlock(&(wq).lock); \
474 schedule(); \
475 if (irq) \
476 spin_lock_irq(&(wq).lock); \
477 else \
478 spin_lock(&(wq).lock); \
479 } while (!(condition)); \
480 __remove_wait_queue(&(wq), &__wait); \
481 __set_current_state(TASK_RUNNING); \
482 __ret; \
483})
484
485
486/**
487 * wait_event_interruptible_locked - sleep until a condition gets true
488 * @wq: the waitqueue to wait on
489 * @condition: a C expression for the event to wait for
490 *
491 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
492 * @condition evaluates to true or a signal is received.
493 * The @condition is checked each time the waitqueue @wq is woken up.
494 *
495 * It must be called with wq.lock being held. This spinlock is
496 * unlocked while sleeping but @condition testing is done while lock
497 * is held and when this macro exits the lock is held.
498 *
499 * The lock is locked/unlocked using spin_lock()/spin_unlock()
500 * functions which must match the way they are locked/unlocked outside
501 * of this macro.
502 *
503 * wake_up_locked() has to be called after changing any variable that could
504 * change the result of the wait condition.
505 *
506 * The function will return -ERESTARTSYS if it was interrupted by a
507 * signal and 0 if @condition evaluated to true.
508 */
509#define wait_event_interruptible_locked(wq, condition) \
510 ((condition) \
511 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
512
513/**
514 * wait_event_interruptible_locked_irq - sleep until a condition gets true
515 * @wq: the waitqueue to wait on
516 * @condition: a C expression for the event to wait for
517 *
518 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
519 * @condition evaluates to true or a signal is received.
520 * The @condition is checked each time the waitqueue @wq is woken up.
521 *
522 * It must be called with wq.lock being held. This spinlock is
523 * unlocked while sleeping but @condition testing is done while lock
524 * is held and when this macro exits the lock is held.
525 *
526 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
527 * functions which must match the way they are locked/unlocked outside
528 * of this macro.
529 *
530 * wake_up_locked() has to be called after changing any variable that could
531 * change the result of the wait condition.
532 *
533 * The function will return -ERESTARTSYS if it was interrupted by a
534 * signal and 0 if @condition evaluated to true.
535 */
536#define wait_event_interruptible_locked_irq(wq, condition) \
537 ((condition) \
538 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
539
540/**
541 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
542 * @wq: the waitqueue to wait on
543 * @condition: a C expression for the event to wait for
544 *
545 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
546 * @condition evaluates to true or a signal is received.
547 * The @condition is checked each time the waitqueue @wq is woken up.
548 *
549 * It must be called with wq.lock being held. This spinlock is
550 * unlocked while sleeping but @condition testing is done while lock
551 * is held and when this macro exits the lock is held.
552 *
553 * The lock is locked/unlocked using spin_lock()/spin_unlock()
554 * functions which must match the way they are locked/unlocked outside
555 * of this macro.
556 *
557 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
558 * set thus when other process waits process on the list if this
559 * process is awaken further processes are not considered.
560 *
561 * wake_up_locked() has to be called after changing any variable that could
562 * change the result of the wait condition.
563 *
564 * The function will return -ERESTARTSYS if it was interrupted by a
565 * signal and 0 if @condition evaluated to true.
566 */
567#define wait_event_interruptible_exclusive_locked(wq, condition) \
568 ((condition) \
569 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
570
571/**
572 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
573 * @wq: the waitqueue to wait on
574 * @condition: a C expression for the event to wait for
575 *
576 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
577 * @condition evaluates to true or a signal is received.
578 * The @condition is checked each time the waitqueue @wq is woken up.
579 *
580 * It must be called with wq.lock being held. This spinlock is
581 * unlocked while sleeping but @condition testing is done while lock
582 * is held and when this macro exits the lock is held.
583 *
584 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
585 * functions which must match the way they are locked/unlocked outside
586 * of this macro.
587 *
588 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
589 * set thus when other process waits process on the list if this
590 * process is awaken further processes are not considered.
591 *
592 * wake_up_locked() has to be called after changing any variable that could
593 * change the result of the wait condition.
594 *
595 * The function will return -ERESTARTSYS if it was interrupted by a
596 * signal and 0 if @condition evaluated to true.
597 */
598#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
599 ((condition) \
600 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
601
602
603
1411d5a7
MW
604#define __wait_event_killable(wq, condition, ret) \
605do { \
606 DEFINE_WAIT(__wait); \
607 \
608 for (;;) { \
609 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
610 if (condition) \
611 break; \
612 if (!fatal_signal_pending(current)) { \
613 schedule(); \
614 continue; \
615 } \
616 ret = -ERESTARTSYS; \
617 break; \
618 } \
619 finish_wait(&wq, &__wait); \
620} while (0)
621
622/**
623 * wait_event_killable - sleep until a condition gets true
624 * @wq: the waitqueue to wait on
625 * @condition: a C expression for the event to wait for
626 *
627 * The process is put to sleep (TASK_KILLABLE) until the
628 * @condition evaluates to true or a signal is received.
629 * The @condition is checked each time the waitqueue @wq is woken up.
630 *
631 * wake_up() has to be called after changing any variable that could
632 * change the result of the wait condition.
633 *
634 * The function will return -ERESTARTSYS if it was interrupted by a
635 * signal and 0 if @condition evaluated to true.
636 */
637#define wait_event_killable(wq, condition) \
638({ \
639 int __ret = 0; \
640 if (!(condition)) \
641 __wait_event_killable(wq, condition, __ret); \
642 __ret; \
643})
644
eed8c02e
LC
645
646#define __wait_event_lock_irq(wq, condition, lock, cmd) \
647do { \
648 DEFINE_WAIT(__wait); \
649 \
650 for (;;) { \
651 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
652 if (condition) \
653 break; \
654 spin_unlock_irq(&lock); \
655 cmd; \
656 schedule(); \
657 spin_lock_irq(&lock); \
658 } \
659 finish_wait(&wq, &__wait); \
660} while (0)
661
662/**
663 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
664 * condition is checked under the lock. This
665 * is expected to be called with the lock
666 * taken.
667 * @wq: the waitqueue to wait on
668 * @condition: a C expression for the event to wait for
669 * @lock: a locked spinlock_t, which will be released before cmd
670 * and schedule() and reacquired afterwards.
671 * @cmd: a command which is invoked outside the critical section before
672 * sleep
673 *
674 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
675 * @condition evaluates to true. The @condition is checked each time
676 * the waitqueue @wq is woken up.
677 *
678 * wake_up() has to be called after changing any variable that could
679 * change the result of the wait condition.
680 *
681 * This is supposed to be called while holding the lock. The lock is
682 * dropped before invoking the cmd and going to sleep and is reacquired
683 * afterwards.
684 */
685#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
686do { \
687 if (condition) \
688 break; \
689 __wait_event_lock_irq(wq, condition, lock, cmd); \
690} while (0)
691
692/**
693 * wait_event_lock_irq - sleep until a condition gets true. The
694 * condition is checked under the lock. This
695 * is expected to be called with the lock
696 * taken.
697 * @wq: the waitqueue to wait on
698 * @condition: a C expression for the event to wait for
699 * @lock: a locked spinlock_t, which will be released before schedule()
700 * and reacquired afterwards.
701 *
702 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
703 * @condition evaluates to true. The @condition is checked each time
704 * the waitqueue @wq is woken up.
705 *
706 * wake_up() has to be called after changing any variable that could
707 * change the result of the wait condition.
708 *
709 * This is supposed to be called while holding the lock. The lock is
710 * dropped before going to sleep and is reacquired afterwards.
711 */
712#define wait_event_lock_irq(wq, condition, lock) \
713do { \
714 if (condition) \
715 break; \
716 __wait_event_lock_irq(wq, condition, lock, ); \
717} while (0)
718
719
720#define __wait_event_interruptible_lock_irq(wq, condition, \
721 lock, ret, cmd) \
722do { \
723 DEFINE_WAIT(__wait); \
724 \
725 for (;;) { \
726 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
727 if (condition) \
728 break; \
729 if (signal_pending(current)) { \
730 ret = -ERESTARTSYS; \
731 break; \
732 } \
733 spin_unlock_irq(&lock); \
734 cmd; \
735 schedule(); \
736 spin_lock_irq(&lock); \
737 } \
738 finish_wait(&wq, &__wait); \
739} while (0)
740
741/**
742 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
743 * The condition is checked under the lock. This is expected to
744 * be called with the lock taken.
745 * @wq: the waitqueue to wait on
746 * @condition: a C expression for the event to wait for
747 * @lock: a locked spinlock_t, which will be released before cmd and
748 * schedule() and reacquired afterwards.
749 * @cmd: a command which is invoked outside the critical section before
750 * sleep
751 *
752 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
753 * @condition evaluates to true or a signal is received. The @condition is
754 * checked each time the waitqueue @wq is woken up.
755 *
756 * wake_up() has to be called after changing any variable that could
757 * change the result of the wait condition.
758 *
759 * This is supposed to be called while holding the lock. The lock is
760 * dropped before invoking the cmd and going to sleep and is reacquired
761 * afterwards.
762 *
763 * The macro will return -ERESTARTSYS if it was interrupted by a signal
764 * and 0 if @condition evaluated to true.
765 */
766#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
767({ \
768 int __ret = 0; \
769 \
770 if (!(condition)) \
771 __wait_event_interruptible_lock_irq(wq, condition, \
772 lock, __ret, cmd); \
773 __ret; \
774})
775
776/**
777 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
778 * The condition is checked under the lock. This is expected
779 * to be called with the lock taken.
780 * @wq: the waitqueue to wait on
781 * @condition: a C expression for the event to wait for
782 * @lock: a locked spinlock_t, which will be released before schedule()
783 * and reacquired afterwards.
784 *
785 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
786 * @condition evaluates to true or signal is received. The @condition is
787 * checked each time the waitqueue @wq is woken up.
788 *
789 * wake_up() has to be called after changing any variable that could
790 * change the result of the wait condition.
791 *
792 * This is supposed to be called while holding the lock. The lock is
793 * dropped before going to sleep and is reacquired afterwards.
794 *
795 * The macro will return -ERESTARTSYS if it was interrupted by a signal
796 * and 0 if @condition evaluated to true.
797 */
798#define wait_event_interruptible_lock_irq(wq, condition, lock) \
799({ \
800 int __ret = 0; \
801 \
802 if (!(condition)) \
803 __wait_event_interruptible_lock_irq(wq, condition, \
804 lock, __ret, ); \
805 __ret; \
806})
807
808
1da177e4
LT
809/*
810 * These are the old interfaces to sleep waiting for an event.
0fec171c
IM
811 * They are racy. DO NOT use them, use the wait_event* interfaces above.
812 * We plan to remove these interfaces.
1da177e4 813 */
0fec171c
IM
814extern void sleep_on(wait_queue_head_t *q);
815extern long sleep_on_timeout(wait_queue_head_t *q,
816 signed long timeout);
817extern void interruptible_sleep_on(wait_queue_head_t *q);
818extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
819 signed long timeout);
1da177e4
LT
820
821/*
822 * Waitqueues which are removed from the waitqueue_head at wakeup time
823 */
b3c97528
HH
824void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
825void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
826void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
777c6c5f
JW
827void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
828 unsigned int mode, void *key);
1da177e4
LT
829int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
830int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
831
bf368e4e 832#define DEFINE_WAIT_FUNC(name, function) \
1da177e4 833 wait_queue_t name = { \
c43dc2fd 834 .private = current, \
bf368e4e 835 .func = function, \
7e43c84e 836 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
837 }
838
bf368e4e
ED
839#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
840
1da177e4
LT
841#define DEFINE_WAIT_BIT(name, word, bit) \
842 struct wait_bit_queue name = { \
843 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
844 .wait = { \
c43dc2fd 845 .private = current, \
1da177e4
LT
846 .func = wake_bit_function, \
847 .task_list = \
848 LIST_HEAD_INIT((name).wait.task_list), \
849 }, \
850 }
851
852#define init_wait(wait) \
853 do { \
c43dc2fd 854 (wait)->private = current; \
1da177e4
LT
855 (wait)->func = autoremove_wake_function; \
856 INIT_LIST_HEAD(&(wait)->task_list); \
231d0aef 857 (wait)->flags = 0; \
1da177e4
LT
858 } while (0)
859
860/**
861 * wait_on_bit - wait for a bit to be cleared
862 * @word: the word being waited on, a kernel virtual address
863 * @bit: the bit of the word being waited on
864 * @action: the function used to sleep, which may take special actions
865 * @mode: the task state to sleep in
866 *
867 * There is a standard hashed waitqueue table for generic use. This
868 * is the part of the hashtable's accessor API that waits on a bit.
869 * For instance, if one were to have waiters on a bitflag, one would
870 * call wait_on_bit() in threads waiting for the bit to clear.
871 * One uses wait_on_bit() where one is waiting for the bit to clear,
872 * but has no intention of setting it.
873 */
874static inline int wait_on_bit(void *word, int bit,
875 int (*action)(void *), unsigned mode)
876{
877 if (!test_bit(bit, word))
878 return 0;
879 return out_of_line_wait_on_bit(word, bit, action, mode);
880}
881
882/**
883 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
884 * @word: the word being waited on, a kernel virtual address
885 * @bit: the bit of the word being waited on
886 * @action: the function used to sleep, which may take special actions
887 * @mode: the task state to sleep in
888 *
889 * There is a standard hashed waitqueue table for generic use. This
890 * is the part of the hashtable's accessor API that waits on a bit
891 * when one intends to set it, for instance, trying to lock bitflags.
892 * For instance, if one were to have waiters trying to set bitflag
893 * and waiting for it to clear before setting it, one would call
894 * wait_on_bit() in threads waiting to be able to set the bit.
895 * One uses wait_on_bit_lock() where one is waiting for the bit to
896 * clear with the intention of setting it, and when done, clearing it.
897 */
898static inline int wait_on_bit_lock(void *word, int bit,
899 int (*action)(void *), unsigned mode)
900{
901 if (!test_and_set_bit(bit, word))
902 return 0;
903 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
904}
cb65537e
DH
905
906/**
907 * wait_on_atomic_t - Wait for an atomic_t to become 0
908 * @val: The atomic value being waited on, a kernel virtual address
909 * @action: the function used to sleep, which may take special actions
910 * @mode: the task state to sleep in
911 *
912 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
913 * the purpose of getting a waitqueue, but we set the key to a bit number
914 * outside of the target 'word'.
915 */
916static inline
917int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
918{
919 if (atomic_read(val) == 0)
920 return 0;
921 return out_of_line_wait_on_atomic_t(val, action, mode);
922}
1da177e4 923
1da177e4 924#endif
This page took 1.116448 seconds and 5 git commands to generate.