epoll keyed wakeups: add __wake_up_locked_key() and __wake_up_sync_key()
[deliverable/linux.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4#define WNOHANG 0x00000001
5#define WUNTRACED 0x00000002
6#define WSTOPPED WUNTRACED
7#define WEXITED 0x00000004
8#define WCONTINUED 0x00000008
9#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10
11#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12#define __WALL 0x40000000 /* Wait on all children, regardless of type */
13#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14
15/* First argument to waitid: */
16#define P_ALL 0
17#define P_PID 1
18#define P_PGID 2
19
20#ifdef __KERNEL__
21
1da177e4
LT
22#include <linux/list.h>
23#include <linux/stddef.h>
24#include <linux/spinlock.h>
25#include <asm/system.h>
26#include <asm/current.h>
27
28typedef struct __wait_queue wait_queue_t;
29typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
30int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
31
32struct __wait_queue {
33 unsigned int flags;
34#define WQ_FLAG_EXCLUSIVE 0x01
c43dc2fd 35 void *private;
1da177e4
LT
36 wait_queue_func_t func;
37 struct list_head task_list;
38};
39
40struct wait_bit_key {
41 void *flags;
42 int bit_nr;
43};
44
45struct wait_bit_queue {
46 struct wait_bit_key key;
47 wait_queue_t wait;
48};
49
50struct __wait_queue_head {
51 spinlock_t lock;
52 struct list_head task_list;
53};
54typedef struct __wait_queue_head wait_queue_head_t;
55
8c65b4a6 56struct task_struct;
1da177e4
LT
57
58/*
59 * Macros for declaration and initialisaton of the datatypes
60 */
61
62#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 63 .private = tsk, \
1da177e4
LT
64 .func = default_wake_function, \
65 .task_list = { NULL, NULL } }
66
67#define DECLARE_WAITQUEUE(name, tsk) \
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69
70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
e4d91918 71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
1da177e4
LT
72 .task_list = { &(name).task_list, &(name).task_list } }
73
74#define DECLARE_WAIT_QUEUE_HEAD(name) \
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
76
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, }
79
21d71f51 80extern void init_waitqueue_head(wait_queue_head_t *q);
1da177e4 81
7259f0d0
PZ
82#ifdef CONFIG_LOCKDEP
83# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
84 ({ init_waitqueue_head(&name); name; })
85# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
86 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
87#else
88# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
89#endif
90
1da177e4
LT
91static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
92{
93 q->flags = 0;
c43dc2fd 94 q->private = p;
1da177e4
LT
95 q->func = default_wake_function;
96}
97
98static inline void init_waitqueue_func_entry(wait_queue_t *q,
99 wait_queue_func_t func)
100{
101 q->flags = 0;
c43dc2fd 102 q->private = NULL;
1da177e4
LT
103 q->func = func;
104}
105
106static inline int waitqueue_active(wait_queue_head_t *q)
107{
108 return !list_empty(&q->task_list);
109}
110
b3c97528
HH
111extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
112extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
113extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
1da177e4
LT
114
115static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
116{
117 list_add(&new->task_list, &head->task_list);
118}
119
120/*
121 * Used for wake-one threads:
122 */
123static inline void __add_wait_queue_tail(wait_queue_head_t *head,
124 wait_queue_t *new)
125{
126 list_add_tail(&new->task_list, &head->task_list);
127}
128
129static inline void __remove_wait_queue(wait_queue_head_t *head,
130 wait_queue_t *old)
131{
132 list_del(&old->task_list);
133}
134
777c6c5f
JW
135void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
136 int nr_exclusive, int sync, void *key);
b3c97528 137void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
4ede816a
DL
138void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
139void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
140 void *key);
141void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
142void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
b3c97528
HH
143void __wake_up_bit(wait_queue_head_t *, void *, int);
144int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
145int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
146void wake_up_bit(void *, int);
147int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
148int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
149wait_queue_head_t *bit_waitqueue(void *, int);
1da177e4 150
e64d66c8
MW
151#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
152#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
153#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
154#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
155
1da177e4
LT
156#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
157#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
158#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
e64d66c8 159#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
1da177e4 160
0ccf831c
PZ
161#ifdef CONFIG_DEBUG_LOCK_ALLOC
162/*
163 * macro to avoid include hell
164 */
165#define wake_up_nested(x, s) \
166do { \
167 unsigned long flags; \
168 \
169 spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \
170 wake_up_locked(x); \
171 spin_unlock_irqrestore(&(x)->lock, flags); \
172} while (0)
173#else
174#define wake_up_nested(x, s) wake_up(x)
175#endif
176
1da177e4
LT
177#define __wait_event(wq, condition) \
178do { \
179 DEFINE_WAIT(__wait); \
180 \
181 for (;;) { \
182 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
183 if (condition) \
184 break; \
185 schedule(); \
186 } \
187 finish_wait(&wq, &__wait); \
188} while (0)
189
190/**
191 * wait_event - sleep until a condition gets true
192 * @wq: the waitqueue to wait on
193 * @condition: a C expression for the event to wait for
194 *
195 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
196 * @condition evaluates to true. The @condition is checked each time
197 * the waitqueue @wq is woken up.
198 *
199 * wake_up() has to be called after changing any variable that could
200 * change the result of the wait condition.
201 */
202#define wait_event(wq, condition) \
203do { \
204 if (condition) \
205 break; \
206 __wait_event(wq, condition); \
207} while (0)
208
209#define __wait_event_timeout(wq, condition, ret) \
210do { \
211 DEFINE_WAIT(__wait); \
212 \
213 for (;;) { \
214 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
215 if (condition) \
216 break; \
217 ret = schedule_timeout(ret); \
218 if (!ret) \
219 break; \
220 } \
221 finish_wait(&wq, &__wait); \
222} while (0)
223
224/**
225 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
226 * @wq: the waitqueue to wait on
227 * @condition: a C expression for the event to wait for
228 * @timeout: timeout, in jiffies
229 *
230 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
231 * @condition evaluates to true. The @condition is checked each time
232 * the waitqueue @wq is woken up.
233 *
234 * wake_up() has to be called after changing any variable that could
235 * change the result of the wait condition.
236 *
237 * The function returns 0 if the @timeout elapsed, and the remaining
238 * jiffies if the condition evaluated to true before the timeout elapsed.
239 */
240#define wait_event_timeout(wq, condition, timeout) \
241({ \
242 long __ret = timeout; \
243 if (!(condition)) \
244 __wait_event_timeout(wq, condition, __ret); \
245 __ret; \
246})
247
248#define __wait_event_interruptible(wq, condition, ret) \
249do { \
250 DEFINE_WAIT(__wait); \
251 \
252 for (;;) { \
253 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
254 if (condition) \
255 break; \
256 if (!signal_pending(current)) { \
257 schedule(); \
258 continue; \
259 } \
260 ret = -ERESTARTSYS; \
261 break; \
262 } \
263 finish_wait(&wq, &__wait); \
264} while (0)
265
266/**
267 * wait_event_interruptible - sleep until a condition gets true
268 * @wq: the waitqueue to wait on
269 * @condition: a C expression for the event to wait for
270 *
271 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
272 * @condition evaluates to true or a signal is received.
273 * The @condition is checked each time the waitqueue @wq is woken up.
274 *
275 * wake_up() has to be called after changing any variable that could
276 * change the result of the wait condition.
277 *
278 * The function will return -ERESTARTSYS if it was interrupted by a
279 * signal and 0 if @condition evaluated to true.
280 */
281#define wait_event_interruptible(wq, condition) \
282({ \
283 int __ret = 0; \
284 if (!(condition)) \
285 __wait_event_interruptible(wq, condition, __ret); \
286 __ret; \
287})
288
289#define __wait_event_interruptible_timeout(wq, condition, ret) \
290do { \
291 DEFINE_WAIT(__wait); \
292 \
293 for (;;) { \
294 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
295 if (condition) \
296 break; \
297 if (!signal_pending(current)) { \
298 ret = schedule_timeout(ret); \
299 if (!ret) \
300 break; \
301 continue; \
302 } \
303 ret = -ERESTARTSYS; \
304 break; \
305 } \
306 finish_wait(&wq, &__wait); \
307} while (0)
308
309/**
310 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
311 * @wq: the waitqueue to wait on
312 * @condition: a C expression for the event to wait for
313 * @timeout: timeout, in jiffies
314 *
315 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
316 * @condition evaluates to true or a signal is received.
317 * The @condition is checked each time the waitqueue @wq is woken up.
318 *
319 * wake_up() has to be called after changing any variable that could
320 * change the result of the wait condition.
321 *
322 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
323 * was interrupted by a signal, and the remaining jiffies otherwise
324 * if the condition evaluated to true before the timeout elapsed.
325 */
326#define wait_event_interruptible_timeout(wq, condition, timeout) \
327({ \
328 long __ret = timeout; \
329 if (!(condition)) \
330 __wait_event_interruptible_timeout(wq, condition, __ret); \
331 __ret; \
332})
333
334#define __wait_event_interruptible_exclusive(wq, condition, ret) \
335do { \
336 DEFINE_WAIT(__wait); \
337 \
338 for (;;) { \
339 prepare_to_wait_exclusive(&wq, &__wait, \
340 TASK_INTERRUPTIBLE); \
777c6c5f
JW
341 if (condition) { \
342 finish_wait(&wq, &__wait); \
1da177e4 343 break; \
777c6c5f 344 } \
1da177e4
LT
345 if (!signal_pending(current)) { \
346 schedule(); \
347 continue; \
348 } \
349 ret = -ERESTARTSYS; \
777c6c5f
JW
350 abort_exclusive_wait(&wq, &__wait, \
351 TASK_INTERRUPTIBLE, NULL); \
1da177e4
LT
352 break; \
353 } \
1da177e4
LT
354} while (0)
355
356#define wait_event_interruptible_exclusive(wq, condition) \
357({ \
358 int __ret = 0; \
359 if (!(condition)) \
360 __wait_event_interruptible_exclusive(wq, condition, __ret);\
361 __ret; \
362})
363
1411d5a7
MW
364#define __wait_event_killable(wq, condition, ret) \
365do { \
366 DEFINE_WAIT(__wait); \
367 \
368 for (;;) { \
369 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
370 if (condition) \
371 break; \
372 if (!fatal_signal_pending(current)) { \
373 schedule(); \
374 continue; \
375 } \
376 ret = -ERESTARTSYS; \
377 break; \
378 } \
379 finish_wait(&wq, &__wait); \
380} while (0)
381
382/**
383 * wait_event_killable - sleep until a condition gets true
384 * @wq: the waitqueue to wait on
385 * @condition: a C expression for the event to wait for
386 *
387 * The process is put to sleep (TASK_KILLABLE) until the
388 * @condition evaluates to true or a signal is received.
389 * The @condition is checked each time the waitqueue @wq is woken up.
390 *
391 * wake_up() has to be called after changing any variable that could
392 * change the result of the wait condition.
393 *
394 * The function will return -ERESTARTSYS if it was interrupted by a
395 * signal and 0 if @condition evaluated to true.
396 */
397#define wait_event_killable(wq, condition) \
398({ \
399 int __ret = 0; \
400 if (!(condition)) \
401 __wait_event_killable(wq, condition, __ret); \
402 __ret; \
403})
404
1da177e4
LT
405/*
406 * Must be called with the spinlock in the wait_queue_head_t held.
407 */
408static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
409 wait_queue_t * wait)
410{
411 wait->flags |= WQ_FLAG_EXCLUSIVE;
412 __add_wait_queue_tail(q, wait);
413}
414
415/*
416 * Must be called with the spinlock in the wait_queue_head_t held.
417 */
418static inline void remove_wait_queue_locked(wait_queue_head_t *q,
419 wait_queue_t * wait)
420{
421 __remove_wait_queue(q, wait);
422}
423
424/*
425 * These are the old interfaces to sleep waiting for an event.
0fec171c
IM
426 * They are racy. DO NOT use them, use the wait_event* interfaces above.
427 * We plan to remove these interfaces.
1da177e4 428 */
0fec171c
IM
429extern void sleep_on(wait_queue_head_t *q);
430extern long sleep_on_timeout(wait_queue_head_t *q,
431 signed long timeout);
432extern void interruptible_sleep_on(wait_queue_head_t *q);
433extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
434 signed long timeout);
1da177e4
LT
435
436/*
437 * Waitqueues which are removed from the waitqueue_head at wakeup time
438 */
b3c97528
HH
439void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
440void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
441void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
777c6c5f
JW
442void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
443 unsigned int mode, void *key);
1da177e4
LT
444int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
445int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
446
447#define DEFINE_WAIT(name) \
448 wait_queue_t name = { \
c43dc2fd 449 .private = current, \
1da177e4 450 .func = autoremove_wake_function, \
7e43c84e 451 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
452 }
453
454#define DEFINE_WAIT_BIT(name, word, bit) \
455 struct wait_bit_queue name = { \
456 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
457 .wait = { \
c43dc2fd 458 .private = current, \
1da177e4
LT
459 .func = wake_bit_function, \
460 .task_list = \
461 LIST_HEAD_INIT((name).wait.task_list), \
462 }, \
463 }
464
465#define init_wait(wait) \
466 do { \
c43dc2fd 467 (wait)->private = current; \
1da177e4
LT
468 (wait)->func = autoremove_wake_function; \
469 INIT_LIST_HEAD(&(wait)->task_list); \
470 } while (0)
471
472/**
473 * wait_on_bit - wait for a bit to be cleared
474 * @word: the word being waited on, a kernel virtual address
475 * @bit: the bit of the word being waited on
476 * @action: the function used to sleep, which may take special actions
477 * @mode: the task state to sleep in
478 *
479 * There is a standard hashed waitqueue table for generic use. This
480 * is the part of the hashtable's accessor API that waits on a bit.
481 * For instance, if one were to have waiters on a bitflag, one would
482 * call wait_on_bit() in threads waiting for the bit to clear.
483 * One uses wait_on_bit() where one is waiting for the bit to clear,
484 * but has no intention of setting it.
485 */
486static inline int wait_on_bit(void *word, int bit,
487 int (*action)(void *), unsigned mode)
488{
489 if (!test_bit(bit, word))
490 return 0;
491 return out_of_line_wait_on_bit(word, bit, action, mode);
492}
493
494/**
495 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
496 * @word: the word being waited on, a kernel virtual address
497 * @bit: the bit of the word being waited on
498 * @action: the function used to sleep, which may take special actions
499 * @mode: the task state to sleep in
500 *
501 * There is a standard hashed waitqueue table for generic use. This
502 * is the part of the hashtable's accessor API that waits on a bit
503 * when one intends to set it, for instance, trying to lock bitflags.
504 * For instance, if one were to have waiters trying to set bitflag
505 * and waiting for it to clear before setting it, one would call
506 * wait_on_bit() in threads waiting to be able to set the bit.
507 * One uses wait_on_bit_lock() where one is waiting for the bit to
508 * clear with the intention of setting it, and when done, clearing it.
509 */
510static inline int wait_on_bit_lock(void *word, int bit,
511 int (*action)(void *), unsigned mode)
512{
513 if (!test_and_set_bit(bit, word))
514 return 0;
515 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
516}
517
518#endif /* __KERNEL__ */
519
520#endif
This page took 1.100717 seconds and 5 git commands to generate.