[PATCH] uml: use generic sys_rt_sigsuspend
[deliverable/linux.git] / include / linux / wait.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4#define WNOHANG 0x00000001
5#define WUNTRACED 0x00000002
6#define WSTOPPED WUNTRACED
7#define WEXITED 0x00000004
8#define WCONTINUED 0x00000008
9#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10
11#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12#define __WALL 0x40000000 /* Wait on all children, regardless of type */
13#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14
15/* First argument to waitid: */
16#define P_ALL 0
17#define P_PID 1
18#define P_PGID 2
19
20#ifdef __KERNEL__
21
22#include <linux/config.h>
23#include <linux/list.h>
24#include <linux/stddef.h>
25#include <linux/spinlock.h>
26#include <asm/system.h>
27#include <asm/current.h>
28
29typedef struct __wait_queue wait_queue_t;
30typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key);
31int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
32
33struct __wait_queue {
34 unsigned int flags;
35#define WQ_FLAG_EXCLUSIVE 0x01
c43dc2fd 36 void *private;
1da177e4
LT
37 wait_queue_func_t func;
38 struct list_head task_list;
39};
40
41struct wait_bit_key {
42 void *flags;
43 int bit_nr;
44};
45
46struct wait_bit_queue {
47 struct wait_bit_key key;
48 wait_queue_t wait;
49};
50
51struct __wait_queue_head {
52 spinlock_t lock;
53 struct list_head task_list;
54};
55typedef struct __wait_queue_head wait_queue_head_t;
56
8c65b4a6 57struct task_struct;
1da177e4
LT
58
59/*
60 * Macros for declaration and initialisaton of the datatypes
61 */
62
63#define __WAITQUEUE_INITIALIZER(name, tsk) { \
c43dc2fd 64 .private = tsk, \
1da177e4
LT
65 .func = default_wake_function, \
66 .task_list = { NULL, NULL } }
67
68#define DECLARE_WAITQUEUE(name, tsk) \
69 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
70
71#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
72 .lock = SPIN_LOCK_UNLOCKED, \
73 .task_list = { &(name).task_list, &(name).task_list } }
74
75#define DECLARE_WAIT_QUEUE_HEAD(name) \
76 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
77
78#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
79 { .flags = word, .bit_nr = bit, }
80
81static inline void init_waitqueue_head(wait_queue_head_t *q)
82{
83 spin_lock_init(&q->lock);
84 INIT_LIST_HEAD(&q->task_list);
85}
86
87static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
88{
89 q->flags = 0;
c43dc2fd 90 q->private = p;
1da177e4
LT
91 q->func = default_wake_function;
92}
93
94static inline void init_waitqueue_func_entry(wait_queue_t *q,
95 wait_queue_func_t func)
96{
97 q->flags = 0;
c43dc2fd 98 q->private = NULL;
1da177e4
LT
99 q->func = func;
100}
101
102static inline int waitqueue_active(wait_queue_head_t *q)
103{
104 return !list_empty(&q->task_list);
105}
106
107/*
108 * Used to distinguish between sync and async io wait context:
109 * sync i/o typically specifies a NULL wait queue entry or a wait
110 * queue entry bound to a task (current task) to wake up.
111 * aio specifies a wait queue entry with an async notification
112 * callback routine, not associated with any task.
113 */
c43dc2fd 114#define is_sync_wait(wait) (!(wait) || ((wait)->private))
1da177e4
LT
115
116extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
117extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait));
118extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait));
119
120static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
121{
122 list_add(&new->task_list, &head->task_list);
123}
124
125/*
126 * Used for wake-one threads:
127 */
128static inline void __add_wait_queue_tail(wait_queue_head_t *head,
129 wait_queue_t *new)
130{
131 list_add_tail(&new->task_list, &head->task_list);
132}
133
134static inline void __remove_wait_queue(wait_queue_head_t *head,
135 wait_queue_t *old)
136{
137 list_del(&old->task_list);
138}
139
140void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key));
141extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode));
142extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr));
143void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int));
144int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
145int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned));
146void FASTCALL(wake_up_bit(void *, int));
147int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned));
148int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned));
149wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int));
150
151#define wake_up(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, NULL)
152#define wake_up_nr(x, nr) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr, NULL)
153#define wake_up_all(x) __wake_up(x, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, NULL)
154#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
155#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
156#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
157#define wake_up_locked(x) __wake_up_locked((x), TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE)
158#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
159
160#define __wait_event(wq, condition) \
161do { \
162 DEFINE_WAIT(__wait); \
163 \
164 for (;;) { \
165 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
166 if (condition) \
167 break; \
168 schedule(); \
169 } \
170 finish_wait(&wq, &__wait); \
171} while (0)
172
173/**
174 * wait_event - sleep until a condition gets true
175 * @wq: the waitqueue to wait on
176 * @condition: a C expression for the event to wait for
177 *
178 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
179 * @condition evaluates to true. The @condition is checked each time
180 * the waitqueue @wq is woken up.
181 *
182 * wake_up() has to be called after changing any variable that could
183 * change the result of the wait condition.
184 */
185#define wait_event(wq, condition) \
186do { \
187 if (condition) \
188 break; \
189 __wait_event(wq, condition); \
190} while (0)
191
192#define __wait_event_timeout(wq, condition, ret) \
193do { \
194 DEFINE_WAIT(__wait); \
195 \
196 for (;;) { \
197 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
198 if (condition) \
199 break; \
200 ret = schedule_timeout(ret); \
201 if (!ret) \
202 break; \
203 } \
204 finish_wait(&wq, &__wait); \
205} while (0)
206
207/**
208 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
209 * @wq: the waitqueue to wait on
210 * @condition: a C expression for the event to wait for
211 * @timeout: timeout, in jiffies
212 *
213 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
214 * @condition evaluates to true. The @condition is checked each time
215 * the waitqueue @wq is woken up.
216 *
217 * wake_up() has to be called after changing any variable that could
218 * change the result of the wait condition.
219 *
220 * The function returns 0 if the @timeout elapsed, and the remaining
221 * jiffies if the condition evaluated to true before the timeout elapsed.
222 */
223#define wait_event_timeout(wq, condition, timeout) \
224({ \
225 long __ret = timeout; \
226 if (!(condition)) \
227 __wait_event_timeout(wq, condition, __ret); \
228 __ret; \
229})
230
231#define __wait_event_interruptible(wq, condition, ret) \
232do { \
233 DEFINE_WAIT(__wait); \
234 \
235 for (;;) { \
236 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
237 if (condition) \
238 break; \
239 if (!signal_pending(current)) { \
240 schedule(); \
241 continue; \
242 } \
243 ret = -ERESTARTSYS; \
244 break; \
245 } \
246 finish_wait(&wq, &__wait); \
247} while (0)
248
249/**
250 * wait_event_interruptible - sleep until a condition gets true
251 * @wq: the waitqueue to wait on
252 * @condition: a C expression for the event to wait for
253 *
254 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
255 * @condition evaluates to true or a signal is received.
256 * The @condition is checked each time the waitqueue @wq is woken up.
257 *
258 * wake_up() has to be called after changing any variable that could
259 * change the result of the wait condition.
260 *
261 * The function will return -ERESTARTSYS if it was interrupted by a
262 * signal and 0 if @condition evaluated to true.
263 */
264#define wait_event_interruptible(wq, condition) \
265({ \
266 int __ret = 0; \
267 if (!(condition)) \
268 __wait_event_interruptible(wq, condition, __ret); \
269 __ret; \
270})
271
272#define __wait_event_interruptible_timeout(wq, condition, ret) \
273do { \
274 DEFINE_WAIT(__wait); \
275 \
276 for (;;) { \
277 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
278 if (condition) \
279 break; \
280 if (!signal_pending(current)) { \
281 ret = schedule_timeout(ret); \
282 if (!ret) \
283 break; \
284 continue; \
285 } \
286 ret = -ERESTARTSYS; \
287 break; \
288 } \
289 finish_wait(&wq, &__wait); \
290} while (0)
291
292/**
293 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
294 * @wq: the waitqueue to wait on
295 * @condition: a C expression for the event to wait for
296 * @timeout: timeout, in jiffies
297 *
298 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
299 * @condition evaluates to true or a signal is received.
300 * The @condition is checked each time the waitqueue @wq is woken up.
301 *
302 * wake_up() has to be called after changing any variable that could
303 * change the result of the wait condition.
304 *
305 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
306 * was interrupted by a signal, and the remaining jiffies otherwise
307 * if the condition evaluated to true before the timeout elapsed.
308 */
309#define wait_event_interruptible_timeout(wq, condition, timeout) \
310({ \
311 long __ret = timeout; \
312 if (!(condition)) \
313 __wait_event_interruptible_timeout(wq, condition, __ret); \
314 __ret; \
315})
316
317#define __wait_event_interruptible_exclusive(wq, condition, ret) \
318do { \
319 DEFINE_WAIT(__wait); \
320 \
321 for (;;) { \
322 prepare_to_wait_exclusive(&wq, &__wait, \
323 TASK_INTERRUPTIBLE); \
324 if (condition) \
325 break; \
326 if (!signal_pending(current)) { \
327 schedule(); \
328 continue; \
329 } \
330 ret = -ERESTARTSYS; \
331 break; \
332 } \
333 finish_wait(&wq, &__wait); \
334} while (0)
335
336#define wait_event_interruptible_exclusive(wq, condition) \
337({ \
338 int __ret = 0; \
339 if (!(condition)) \
340 __wait_event_interruptible_exclusive(wq, condition, __ret);\
341 __ret; \
342})
343
344/*
345 * Must be called with the spinlock in the wait_queue_head_t held.
346 */
347static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
348 wait_queue_t * wait)
349{
350 wait->flags |= WQ_FLAG_EXCLUSIVE;
351 __add_wait_queue_tail(q, wait);
352}
353
354/*
355 * Must be called with the spinlock in the wait_queue_head_t held.
356 */
357static inline void remove_wait_queue_locked(wait_queue_head_t *q,
358 wait_queue_t * wait)
359{
360 __remove_wait_queue(q, wait);
361}
362
363/*
364 * These are the old interfaces to sleep waiting for an event.
365 * They are racy. DO NOT use them, use the wait_event* interfaces above.
366 * We plan to remove these interfaces during 2.7.
367 */
368extern void FASTCALL(sleep_on(wait_queue_head_t *q));
369extern long FASTCALL(sleep_on_timeout(wait_queue_head_t *q,
370 signed long timeout));
371extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
372extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
373 signed long timeout));
374
375/*
376 * Waitqueues which are removed from the waitqueue_head at wakeup time
377 */
378void FASTCALL(prepare_to_wait(wait_queue_head_t *q,
379 wait_queue_t *wait, int state));
380void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q,
381 wait_queue_t *wait, int state));
382void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait));
383int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
384int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
385
386#define DEFINE_WAIT(name) \
387 wait_queue_t name = { \
c43dc2fd 388 .private = current, \
1da177e4 389 .func = autoremove_wake_function, \
7e43c84e 390 .task_list = LIST_HEAD_INIT((name).task_list), \
1da177e4
LT
391 }
392
393#define DEFINE_WAIT_BIT(name, word, bit) \
394 struct wait_bit_queue name = { \
395 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
396 .wait = { \
c43dc2fd 397 .private = current, \
1da177e4
LT
398 .func = wake_bit_function, \
399 .task_list = \
400 LIST_HEAD_INIT((name).wait.task_list), \
401 }, \
402 }
403
404#define init_wait(wait) \
405 do { \
c43dc2fd 406 (wait)->private = current; \
1da177e4
LT
407 (wait)->func = autoremove_wake_function; \
408 INIT_LIST_HEAD(&(wait)->task_list); \
409 } while (0)
410
411/**
412 * wait_on_bit - wait for a bit to be cleared
413 * @word: the word being waited on, a kernel virtual address
414 * @bit: the bit of the word being waited on
415 * @action: the function used to sleep, which may take special actions
416 * @mode: the task state to sleep in
417 *
418 * There is a standard hashed waitqueue table for generic use. This
419 * is the part of the hashtable's accessor API that waits on a bit.
420 * For instance, if one were to have waiters on a bitflag, one would
421 * call wait_on_bit() in threads waiting for the bit to clear.
422 * One uses wait_on_bit() where one is waiting for the bit to clear,
423 * but has no intention of setting it.
424 */
425static inline int wait_on_bit(void *word, int bit,
426 int (*action)(void *), unsigned mode)
427{
428 if (!test_bit(bit, word))
429 return 0;
430 return out_of_line_wait_on_bit(word, bit, action, mode);
431}
432
433/**
434 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
435 * @word: the word being waited on, a kernel virtual address
436 * @bit: the bit of the word being waited on
437 * @action: the function used to sleep, which may take special actions
438 * @mode: the task state to sleep in
439 *
440 * There is a standard hashed waitqueue table for generic use. This
441 * is the part of the hashtable's accessor API that waits on a bit
442 * when one intends to set it, for instance, trying to lock bitflags.
443 * For instance, if one were to have waiters trying to set bitflag
444 * and waiting for it to clear before setting it, one would call
445 * wait_on_bit() in threads waiting to be able to set the bit.
446 * One uses wait_on_bit_lock() where one is waiting for the bit to
447 * clear with the intention of setting it, and when done, clearing it.
448 */
449static inline int wait_on_bit_lock(void *word, int bit,
450 int (*action)(void *), unsigned mode)
451{
452 if (!test_and_set_bit(bit, word))
453 return 0;
454 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
455}
456
457#endif /* __KERNEL__ */
458
459#endif
This page took 0.169235 seconds and 5 git commands to generate.