SCHED: add some "wait..on_bit...timeout()" interfaces.
[deliverable/linux.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4 * Linux wait queue related types and methods
5 */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16 struct __wait_queue {
17 unsigned int flags;
18 #define WQ_FLAG_EXCLUSIVE 0x01
19 void *private;
20 wait_queue_func_t func;
21 struct list_head task_list;
22 };
23
24 struct wait_bit_key {
25 void *flags;
26 int bit_nr;
27 #define WAIT_ATOMIC_T_BIT_NR -1
28 unsigned long timeout;
29 };
30
31 struct wait_bit_queue {
32 struct wait_bit_key key;
33 wait_queue_t wait;
34 };
35
36 struct __wait_queue_head {
37 spinlock_t lock;
38 struct list_head task_list;
39 };
40 typedef struct __wait_queue_head wait_queue_head_t;
41
42 struct task_struct;
43
44 /*
45 * Macros for declaration and initialisaton of the datatypes
46 */
47
48 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
49 .private = tsk, \
50 .func = default_wake_function, \
51 .task_list = { NULL, NULL } }
52
53 #define DECLARE_WAITQUEUE(name, tsk) \
54 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
55
56 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
57 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
58 .task_list = { &(name).task_list, &(name).task_list } }
59
60 #define DECLARE_WAIT_QUEUE_HEAD(name) \
61 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
62
63 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
64 { .flags = word, .bit_nr = bit, }
65
66 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
67 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
68
69 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
70
71 #define init_waitqueue_head(q) \
72 do { \
73 static struct lock_class_key __key; \
74 \
75 __init_waitqueue_head((q), #q, &__key); \
76 } while (0)
77
78 #ifdef CONFIG_LOCKDEP
79 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
80 ({ init_waitqueue_head(&name); name; })
81 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
82 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
83 #else
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
85 #endif
86
87 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
88 {
89 q->flags = 0;
90 q->private = p;
91 q->func = default_wake_function;
92 }
93
94 static inline void
95 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
96 {
97 q->flags = 0;
98 q->private = NULL;
99 q->func = func;
100 }
101
102 static inline int waitqueue_active(wait_queue_head_t *q)
103 {
104 return !list_empty(&q->task_list);
105 }
106
107 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
108 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
109 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
110
111 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
112 {
113 list_add(&new->task_list, &head->task_list);
114 }
115
116 /*
117 * Used for wake-one threads:
118 */
119 static inline void
120 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
121 {
122 wait->flags |= WQ_FLAG_EXCLUSIVE;
123 __add_wait_queue(q, wait);
124 }
125
126 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
127 wait_queue_t *new)
128 {
129 list_add_tail(&new->task_list, &head->task_list);
130 }
131
132 static inline void
133 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
134 {
135 wait->flags |= WQ_FLAG_EXCLUSIVE;
136 __add_wait_queue_tail(q, wait);
137 }
138
139 static inline void
140 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
141 {
142 list_del(&old->task_list);
143 }
144
145 typedef int wait_bit_action_f(struct wait_bit_key *);
146 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
147 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
148 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
149 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
150 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
151 void __wake_up_bit(wait_queue_head_t *, void *, int);
152 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
153 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
154 void wake_up_bit(void *, int);
155 void wake_up_atomic_t(atomic_t *);
156 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
157 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
158 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
159 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
160 wait_queue_head_t *bit_waitqueue(void *, int);
161
162 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
163 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
164 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
165 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
166 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
167
168 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
169 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
170 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
171 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
172
173 /*
174 * Wakeup macros to be used to report events to the targets.
175 */
176 #define wake_up_poll(x, m) \
177 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
178 #define wake_up_locked_poll(x, m) \
179 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
180 #define wake_up_interruptible_poll(x, m) \
181 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
182 #define wake_up_interruptible_sync_poll(x, m) \
183 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
184
185 #define ___wait_cond_timeout(condition) \
186 ({ \
187 bool __cond = (condition); \
188 if (__cond && !__ret) \
189 __ret = 1; \
190 __cond || !__ret; \
191 })
192
193 #define ___wait_is_interruptible(state) \
194 (!__builtin_constant_p(state) || \
195 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
196
197 /*
198 * The below macro ___wait_event() has an explicit shadow of the __ret
199 * variable when used from the wait_event_*() macros.
200 *
201 * This is so that both can use the ___wait_cond_timeout() construct
202 * to wrap the condition.
203 *
204 * The type inconsistency of the wait_event_*() __ret variable is also
205 * on purpose; we use long where we can return timeout values and int
206 * otherwise.
207 */
208
209 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
210 ({ \
211 __label__ __out; \
212 wait_queue_t __wait; \
213 long __ret = ret; /* explicit shadow */ \
214 \
215 INIT_LIST_HEAD(&__wait.task_list); \
216 if (exclusive) \
217 __wait.flags = WQ_FLAG_EXCLUSIVE; \
218 else \
219 __wait.flags = 0; \
220 \
221 for (;;) { \
222 long __int = prepare_to_wait_event(&wq, &__wait, state);\
223 \
224 if (condition) \
225 break; \
226 \
227 if (___wait_is_interruptible(state) && __int) { \
228 __ret = __int; \
229 if (exclusive) { \
230 abort_exclusive_wait(&wq, &__wait, \
231 state, NULL); \
232 goto __out; \
233 } \
234 break; \
235 } \
236 \
237 cmd; \
238 } \
239 finish_wait(&wq, &__wait); \
240 __out: __ret; \
241 })
242
243 #define __wait_event(wq, condition) \
244 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
245 schedule())
246
247 /**
248 * wait_event - sleep until a condition gets true
249 * @wq: the waitqueue to wait on
250 * @condition: a C expression for the event to wait for
251 *
252 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
253 * @condition evaluates to true. The @condition is checked each time
254 * the waitqueue @wq is woken up.
255 *
256 * wake_up() has to be called after changing any variable that could
257 * change the result of the wait condition.
258 */
259 #define wait_event(wq, condition) \
260 do { \
261 if (condition) \
262 break; \
263 __wait_event(wq, condition); \
264 } while (0)
265
266 #define __wait_event_timeout(wq, condition, timeout) \
267 ___wait_event(wq, ___wait_cond_timeout(condition), \
268 TASK_UNINTERRUPTIBLE, 0, timeout, \
269 __ret = schedule_timeout(__ret))
270
271 /**
272 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
273 * @wq: the waitqueue to wait on
274 * @condition: a C expression for the event to wait for
275 * @timeout: timeout, in jiffies
276 *
277 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
278 * @condition evaluates to true. The @condition is checked each time
279 * the waitqueue @wq is woken up.
280 *
281 * wake_up() has to be called after changing any variable that could
282 * change the result of the wait condition.
283 *
284 * The function returns 0 if the @timeout elapsed, or the remaining
285 * jiffies (at least 1) if the @condition evaluated to %true before
286 * the @timeout elapsed.
287 */
288 #define wait_event_timeout(wq, condition, timeout) \
289 ({ \
290 long __ret = timeout; \
291 if (!___wait_cond_timeout(condition)) \
292 __ret = __wait_event_timeout(wq, condition, timeout); \
293 __ret; \
294 })
295
296 #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
297 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
298 cmd1; schedule(); cmd2)
299
300 /**
301 * wait_event_cmd - sleep until a condition gets true
302 * @wq: the waitqueue to wait on
303 * @condition: a C expression for the event to wait for
304 * @cmd1: the command will be executed before sleep
305 * @cmd2: the command will be executed after sleep
306 *
307 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
308 * @condition evaluates to true. The @condition is checked each time
309 * the waitqueue @wq is woken up.
310 *
311 * wake_up() has to be called after changing any variable that could
312 * change the result of the wait condition.
313 */
314 #define wait_event_cmd(wq, condition, cmd1, cmd2) \
315 do { \
316 if (condition) \
317 break; \
318 __wait_event_cmd(wq, condition, cmd1, cmd2); \
319 } while (0)
320
321 #define __wait_event_interruptible(wq, condition) \
322 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
323 schedule())
324
325 /**
326 * wait_event_interruptible - sleep until a condition gets true
327 * @wq: the waitqueue to wait on
328 * @condition: a C expression for the event to wait for
329 *
330 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
331 * @condition evaluates to true or a signal is received.
332 * The @condition is checked each time the waitqueue @wq is woken up.
333 *
334 * wake_up() has to be called after changing any variable that could
335 * change the result of the wait condition.
336 *
337 * The function will return -ERESTARTSYS if it was interrupted by a
338 * signal and 0 if @condition evaluated to true.
339 */
340 #define wait_event_interruptible(wq, condition) \
341 ({ \
342 int __ret = 0; \
343 if (!(condition)) \
344 __ret = __wait_event_interruptible(wq, condition); \
345 __ret; \
346 })
347
348 #define __wait_event_interruptible_timeout(wq, condition, timeout) \
349 ___wait_event(wq, ___wait_cond_timeout(condition), \
350 TASK_INTERRUPTIBLE, 0, timeout, \
351 __ret = schedule_timeout(__ret))
352
353 /**
354 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
355 * @wq: the waitqueue to wait on
356 * @condition: a C expression for the event to wait for
357 * @timeout: timeout, in jiffies
358 *
359 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
360 * @condition evaluates to true or a signal is received.
361 * The @condition is checked each time the waitqueue @wq is woken up.
362 *
363 * wake_up() has to be called after changing any variable that could
364 * change the result of the wait condition.
365 *
366 * Returns:
367 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
368 * a signal, or the remaining jiffies (at least 1) if the @condition
369 * evaluated to %true before the @timeout elapsed.
370 */
371 #define wait_event_interruptible_timeout(wq, condition, timeout) \
372 ({ \
373 long __ret = timeout; \
374 if (!___wait_cond_timeout(condition)) \
375 __ret = __wait_event_interruptible_timeout(wq, \
376 condition, timeout); \
377 __ret; \
378 })
379
380 #define __wait_event_hrtimeout(wq, condition, timeout, state) \
381 ({ \
382 int __ret = 0; \
383 struct hrtimer_sleeper __t; \
384 \
385 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
386 HRTIMER_MODE_REL); \
387 hrtimer_init_sleeper(&__t, current); \
388 if ((timeout).tv64 != KTIME_MAX) \
389 hrtimer_start_range_ns(&__t.timer, timeout, \
390 current->timer_slack_ns, \
391 HRTIMER_MODE_REL); \
392 \
393 __ret = ___wait_event(wq, condition, state, 0, 0, \
394 if (!__t.task) { \
395 __ret = -ETIME; \
396 break; \
397 } \
398 schedule()); \
399 \
400 hrtimer_cancel(&__t.timer); \
401 destroy_hrtimer_on_stack(&__t.timer); \
402 __ret; \
403 })
404
405 /**
406 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
407 * @wq: the waitqueue to wait on
408 * @condition: a C expression for the event to wait for
409 * @timeout: timeout, as a ktime_t
410 *
411 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
412 * @condition evaluates to true or a signal is received.
413 * The @condition is checked each time the waitqueue @wq is woken up.
414 *
415 * wake_up() has to be called after changing any variable that could
416 * change the result of the wait condition.
417 *
418 * The function returns 0 if @condition became true, or -ETIME if the timeout
419 * elapsed.
420 */
421 #define wait_event_hrtimeout(wq, condition, timeout) \
422 ({ \
423 int __ret = 0; \
424 if (!(condition)) \
425 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
426 TASK_UNINTERRUPTIBLE); \
427 __ret; \
428 })
429
430 /**
431 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
432 * @wq: the waitqueue to wait on
433 * @condition: a C expression for the event to wait for
434 * @timeout: timeout, as a ktime_t
435 *
436 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
437 * @condition evaluates to true or a signal is received.
438 * The @condition is checked each time the waitqueue @wq is woken up.
439 *
440 * wake_up() has to be called after changing any variable that could
441 * change the result of the wait condition.
442 *
443 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
444 * interrupted by a signal, or -ETIME if the timeout elapsed.
445 */
446 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
447 ({ \
448 long __ret = 0; \
449 if (!(condition)) \
450 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
451 TASK_INTERRUPTIBLE); \
452 __ret; \
453 })
454
455 #define __wait_event_interruptible_exclusive(wq, condition) \
456 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
457 schedule())
458
459 #define wait_event_interruptible_exclusive(wq, condition) \
460 ({ \
461 int __ret = 0; \
462 if (!(condition)) \
463 __ret = __wait_event_interruptible_exclusive(wq, condition);\
464 __ret; \
465 })
466
467
468 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
469 ({ \
470 int __ret = 0; \
471 DEFINE_WAIT(__wait); \
472 if (exclusive) \
473 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
474 do { \
475 if (likely(list_empty(&__wait.task_list))) \
476 __add_wait_queue_tail(&(wq), &__wait); \
477 set_current_state(TASK_INTERRUPTIBLE); \
478 if (signal_pending(current)) { \
479 __ret = -ERESTARTSYS; \
480 break; \
481 } \
482 if (irq) \
483 spin_unlock_irq(&(wq).lock); \
484 else \
485 spin_unlock(&(wq).lock); \
486 schedule(); \
487 if (irq) \
488 spin_lock_irq(&(wq).lock); \
489 else \
490 spin_lock(&(wq).lock); \
491 } while (!(condition)); \
492 __remove_wait_queue(&(wq), &__wait); \
493 __set_current_state(TASK_RUNNING); \
494 __ret; \
495 })
496
497
498 /**
499 * wait_event_interruptible_locked - sleep until a condition gets true
500 * @wq: the waitqueue to wait on
501 * @condition: a C expression for the event to wait for
502 *
503 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
504 * @condition evaluates to true or a signal is received.
505 * The @condition is checked each time the waitqueue @wq is woken up.
506 *
507 * It must be called with wq.lock being held. This spinlock is
508 * unlocked while sleeping but @condition testing is done while lock
509 * is held and when this macro exits the lock is held.
510 *
511 * The lock is locked/unlocked using spin_lock()/spin_unlock()
512 * functions which must match the way they are locked/unlocked outside
513 * of this macro.
514 *
515 * wake_up_locked() has to be called after changing any variable that could
516 * change the result of the wait condition.
517 *
518 * The function will return -ERESTARTSYS if it was interrupted by a
519 * signal and 0 if @condition evaluated to true.
520 */
521 #define wait_event_interruptible_locked(wq, condition) \
522 ((condition) \
523 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
524
525 /**
526 * wait_event_interruptible_locked_irq - sleep until a condition gets true
527 * @wq: the waitqueue to wait on
528 * @condition: a C expression for the event to wait for
529 *
530 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
531 * @condition evaluates to true or a signal is received.
532 * The @condition is checked each time the waitqueue @wq is woken up.
533 *
534 * It must be called with wq.lock being held. This spinlock is
535 * unlocked while sleeping but @condition testing is done while lock
536 * is held and when this macro exits the lock is held.
537 *
538 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
539 * functions which must match the way they are locked/unlocked outside
540 * of this macro.
541 *
542 * wake_up_locked() has to be called after changing any variable that could
543 * change the result of the wait condition.
544 *
545 * The function will return -ERESTARTSYS if it was interrupted by a
546 * signal and 0 if @condition evaluated to true.
547 */
548 #define wait_event_interruptible_locked_irq(wq, condition) \
549 ((condition) \
550 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
551
552 /**
553 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
554 * @wq: the waitqueue to wait on
555 * @condition: a C expression for the event to wait for
556 *
557 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
558 * @condition evaluates to true or a signal is received.
559 * The @condition is checked each time the waitqueue @wq is woken up.
560 *
561 * It must be called with wq.lock being held. This spinlock is
562 * unlocked while sleeping but @condition testing is done while lock
563 * is held and when this macro exits the lock is held.
564 *
565 * The lock is locked/unlocked using spin_lock()/spin_unlock()
566 * functions which must match the way they are locked/unlocked outside
567 * of this macro.
568 *
569 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
570 * set thus when other process waits process on the list if this
571 * process is awaken further processes are not considered.
572 *
573 * wake_up_locked() has to be called after changing any variable that could
574 * change the result of the wait condition.
575 *
576 * The function will return -ERESTARTSYS if it was interrupted by a
577 * signal and 0 if @condition evaluated to true.
578 */
579 #define wait_event_interruptible_exclusive_locked(wq, condition) \
580 ((condition) \
581 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
582
583 /**
584 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
585 * @wq: the waitqueue to wait on
586 * @condition: a C expression for the event to wait for
587 *
588 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
589 * @condition evaluates to true or a signal is received.
590 * The @condition is checked each time the waitqueue @wq is woken up.
591 *
592 * It must be called with wq.lock being held. This spinlock is
593 * unlocked while sleeping but @condition testing is done while lock
594 * is held and when this macro exits the lock is held.
595 *
596 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
597 * functions which must match the way they are locked/unlocked outside
598 * of this macro.
599 *
600 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
601 * set thus when other process waits process on the list if this
602 * process is awaken further processes are not considered.
603 *
604 * wake_up_locked() has to be called after changing any variable that could
605 * change the result of the wait condition.
606 *
607 * The function will return -ERESTARTSYS if it was interrupted by a
608 * signal and 0 if @condition evaluated to true.
609 */
610 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
611 ((condition) \
612 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
613
614
615 #define __wait_event_killable(wq, condition) \
616 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
617
618 /**
619 * wait_event_killable - sleep until a condition gets true
620 * @wq: the waitqueue to wait on
621 * @condition: a C expression for the event to wait for
622 *
623 * The process is put to sleep (TASK_KILLABLE) until the
624 * @condition evaluates to true or a signal is received.
625 * The @condition is checked each time the waitqueue @wq is woken up.
626 *
627 * wake_up() has to be called after changing any variable that could
628 * change the result of the wait condition.
629 *
630 * The function will return -ERESTARTSYS if it was interrupted by a
631 * signal and 0 if @condition evaluated to true.
632 */
633 #define wait_event_killable(wq, condition) \
634 ({ \
635 int __ret = 0; \
636 if (!(condition)) \
637 __ret = __wait_event_killable(wq, condition); \
638 __ret; \
639 })
640
641
642 #define __wait_event_lock_irq(wq, condition, lock, cmd) \
643 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
644 spin_unlock_irq(&lock); \
645 cmd; \
646 schedule(); \
647 spin_lock_irq(&lock))
648
649 /**
650 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
651 * condition is checked under the lock. This
652 * is expected to be called with the lock
653 * taken.
654 * @wq: the waitqueue to wait on
655 * @condition: a C expression for the event to wait for
656 * @lock: a locked spinlock_t, which will be released before cmd
657 * and schedule() and reacquired afterwards.
658 * @cmd: a command which is invoked outside the critical section before
659 * sleep
660 *
661 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
662 * @condition evaluates to true. The @condition is checked each time
663 * the waitqueue @wq is woken up.
664 *
665 * wake_up() has to be called after changing any variable that could
666 * change the result of the wait condition.
667 *
668 * This is supposed to be called while holding the lock. The lock is
669 * dropped before invoking the cmd and going to sleep and is reacquired
670 * afterwards.
671 */
672 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
673 do { \
674 if (condition) \
675 break; \
676 __wait_event_lock_irq(wq, condition, lock, cmd); \
677 } while (0)
678
679 /**
680 * wait_event_lock_irq - sleep until a condition gets true. The
681 * condition is checked under the lock. This
682 * is expected to be called with the lock
683 * taken.
684 * @wq: the waitqueue to wait on
685 * @condition: a C expression for the event to wait for
686 * @lock: a locked spinlock_t, which will be released before schedule()
687 * and reacquired afterwards.
688 *
689 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
690 * @condition evaluates to true. The @condition is checked each time
691 * the waitqueue @wq is woken up.
692 *
693 * wake_up() has to be called after changing any variable that could
694 * change the result of the wait condition.
695 *
696 * This is supposed to be called while holding the lock. The lock is
697 * dropped before going to sleep and is reacquired afterwards.
698 */
699 #define wait_event_lock_irq(wq, condition, lock) \
700 do { \
701 if (condition) \
702 break; \
703 __wait_event_lock_irq(wq, condition, lock, ); \
704 } while (0)
705
706
707 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
708 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
709 spin_unlock_irq(&lock); \
710 cmd; \
711 schedule(); \
712 spin_lock_irq(&lock))
713
714 /**
715 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
716 * The condition is checked under the lock. This is expected to
717 * be called with the lock taken.
718 * @wq: the waitqueue to wait on
719 * @condition: a C expression for the event to wait for
720 * @lock: a locked spinlock_t, which will be released before cmd and
721 * schedule() and reacquired afterwards.
722 * @cmd: a command which is invoked outside the critical section before
723 * sleep
724 *
725 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
726 * @condition evaluates to true or a signal is received. The @condition is
727 * checked each time the waitqueue @wq is woken up.
728 *
729 * wake_up() has to be called after changing any variable that could
730 * change the result of the wait condition.
731 *
732 * This is supposed to be called while holding the lock. The lock is
733 * dropped before invoking the cmd and going to sleep and is reacquired
734 * afterwards.
735 *
736 * The macro will return -ERESTARTSYS if it was interrupted by a signal
737 * and 0 if @condition evaluated to true.
738 */
739 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
740 ({ \
741 int __ret = 0; \
742 if (!(condition)) \
743 __ret = __wait_event_interruptible_lock_irq(wq, \
744 condition, lock, cmd); \
745 __ret; \
746 })
747
748 /**
749 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
750 * The condition is checked under the lock. This is expected
751 * to be called with the lock taken.
752 * @wq: the waitqueue to wait on
753 * @condition: a C expression for the event to wait for
754 * @lock: a locked spinlock_t, which will be released before schedule()
755 * and reacquired afterwards.
756 *
757 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
758 * @condition evaluates to true or signal is received. The @condition is
759 * checked each time the waitqueue @wq is woken up.
760 *
761 * wake_up() has to be called after changing any variable that could
762 * change the result of the wait condition.
763 *
764 * This is supposed to be called while holding the lock. The lock is
765 * dropped before going to sleep and is reacquired afterwards.
766 *
767 * The macro will return -ERESTARTSYS if it was interrupted by a signal
768 * and 0 if @condition evaluated to true.
769 */
770 #define wait_event_interruptible_lock_irq(wq, condition, lock) \
771 ({ \
772 int __ret = 0; \
773 if (!(condition)) \
774 __ret = __wait_event_interruptible_lock_irq(wq, \
775 condition, lock,); \
776 __ret; \
777 })
778
779 #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
780 lock, timeout) \
781 ___wait_event(wq, ___wait_cond_timeout(condition), \
782 TASK_INTERRUPTIBLE, 0, timeout, \
783 spin_unlock_irq(&lock); \
784 __ret = schedule_timeout(__ret); \
785 spin_lock_irq(&lock));
786
787 /**
788 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
789 * true or a timeout elapses. The condition is checked under
790 * the lock. This is expected to be called with the lock taken.
791 * @wq: the waitqueue to wait on
792 * @condition: a C expression for the event to wait for
793 * @lock: a locked spinlock_t, which will be released before schedule()
794 * and reacquired afterwards.
795 * @timeout: timeout, in jiffies
796 *
797 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
798 * @condition evaluates to true or signal is received. The @condition is
799 * checked each time the waitqueue @wq is woken up.
800 *
801 * wake_up() has to be called after changing any variable that could
802 * change the result of the wait condition.
803 *
804 * This is supposed to be called while holding the lock. The lock is
805 * dropped before going to sleep and is reacquired afterwards.
806 *
807 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
808 * was interrupted by a signal, and the remaining jiffies otherwise
809 * if the condition evaluated to true before the timeout elapsed.
810 */
811 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
812 timeout) \
813 ({ \
814 long __ret = timeout; \
815 if (!___wait_cond_timeout(condition)) \
816 __ret = __wait_event_interruptible_lock_irq_timeout( \
817 wq, condition, lock, timeout); \
818 __ret; \
819 })
820
821 /*
822 * Waitqueues which are removed from the waitqueue_head at wakeup time
823 */
824 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
825 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
826 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
827 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
828 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
829 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
830 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
831
832 #define DEFINE_WAIT_FUNC(name, function) \
833 wait_queue_t name = { \
834 .private = current, \
835 .func = function, \
836 .task_list = LIST_HEAD_INIT((name).task_list), \
837 }
838
839 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
840
841 #define DEFINE_WAIT_BIT(name, word, bit) \
842 struct wait_bit_queue name = { \
843 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
844 .wait = { \
845 .private = current, \
846 .func = wake_bit_function, \
847 .task_list = \
848 LIST_HEAD_INIT((name).wait.task_list), \
849 }, \
850 }
851
852 #define init_wait(wait) \
853 do { \
854 (wait)->private = current; \
855 (wait)->func = autoremove_wake_function; \
856 INIT_LIST_HEAD(&(wait)->task_list); \
857 (wait)->flags = 0; \
858 } while (0)
859
860
861 extern int bit_wait(struct wait_bit_key *);
862 extern int bit_wait_io(struct wait_bit_key *);
863 extern int bit_wait_timeout(struct wait_bit_key *);
864 extern int bit_wait_io_timeout(struct wait_bit_key *);
865
866 /**
867 * wait_on_bit - wait for a bit to be cleared
868 * @word: the word being waited on, a kernel virtual address
869 * @bit: the bit of the word being waited on
870 * @mode: the task state to sleep in
871 *
872 * There is a standard hashed waitqueue table for generic use. This
873 * is the part of the hashtable's accessor API that waits on a bit.
874 * For instance, if one were to have waiters on a bitflag, one would
875 * call wait_on_bit() in threads waiting for the bit to clear.
876 * One uses wait_on_bit() where one is waiting for the bit to clear,
877 * but has no intention of setting it.
878 * Returned value will be zero if the bit was cleared, or non-zero
879 * if the process received a signal and the mode permitted wakeup
880 * on that signal.
881 */
882 static inline int
883 wait_on_bit(void *word, int bit, unsigned mode)
884 {
885 if (!test_bit(bit, word))
886 return 0;
887 return out_of_line_wait_on_bit(word, bit,
888 bit_wait,
889 mode);
890 }
891
892 /**
893 * wait_on_bit_io - wait for a bit to be cleared
894 * @word: the word being waited on, a kernel virtual address
895 * @bit: the bit of the word being waited on
896 * @mode: the task state to sleep in
897 *
898 * Use the standard hashed waitqueue table to wait for a bit
899 * to be cleared. This is similar to wait_on_bit(), but calls
900 * io_schedule() instead of schedule() for the actual waiting.
901 *
902 * Returned value will be zero if the bit was cleared, or non-zero
903 * if the process received a signal and the mode permitted wakeup
904 * on that signal.
905 */
906 static inline int
907 wait_on_bit_io(void *word, int bit, unsigned mode)
908 {
909 if (!test_bit(bit, word))
910 return 0;
911 return out_of_line_wait_on_bit(word, bit,
912 bit_wait_io,
913 mode);
914 }
915
916 /**
917 * wait_on_bit_action - wait for a bit to be cleared
918 * @word: the word being waited on, a kernel virtual address
919 * @bit: the bit of the word being waited on
920 * @action: the function used to sleep, which may take special actions
921 * @mode: the task state to sleep in
922 *
923 * Use the standard hashed waitqueue table to wait for a bit
924 * to be cleared, and allow the waiting action to be specified.
925 * This is like wait_on_bit() but allows fine control of how the waiting
926 * is done.
927 *
928 * Returned value will be zero if the bit was cleared, or non-zero
929 * if the process received a signal and the mode permitted wakeup
930 * on that signal.
931 */
932 static inline int
933 wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
934 {
935 if (!test_bit(bit, word))
936 return 0;
937 return out_of_line_wait_on_bit(word, bit, action, mode);
938 }
939
940 /**
941 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
942 * @word: the word being waited on, a kernel virtual address
943 * @bit: the bit of the word being waited on
944 * @mode: the task state to sleep in
945 *
946 * There is a standard hashed waitqueue table for generic use. This
947 * is the part of the hashtable's accessor API that waits on a bit
948 * when one intends to set it, for instance, trying to lock bitflags.
949 * For instance, if one were to have waiters trying to set bitflag
950 * and waiting for it to clear before setting it, one would call
951 * wait_on_bit() in threads waiting to be able to set the bit.
952 * One uses wait_on_bit_lock() where one is waiting for the bit to
953 * clear with the intention of setting it, and when done, clearing it.
954 *
955 * Returns zero if the bit was (eventually) found to be clear and was
956 * set. Returns non-zero if a signal was delivered to the process and
957 * the @mode allows that signal to wake the process.
958 */
959 static inline int
960 wait_on_bit_lock(void *word, int bit, unsigned mode)
961 {
962 if (!test_and_set_bit(bit, word))
963 return 0;
964 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
965 }
966
967 /**
968 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
969 * @word: the word being waited on, a kernel virtual address
970 * @bit: the bit of the word being waited on
971 * @mode: the task state to sleep in
972 *
973 * Use the standard hashed waitqueue table to wait for a bit
974 * to be cleared and then to atomically set it. This is similar
975 * to wait_on_bit(), but calls io_schedule() instead of schedule()
976 * for the actual waiting.
977 *
978 * Returns zero if the bit was (eventually) found to be clear and was
979 * set. Returns non-zero if a signal was delivered to the process and
980 * the @mode allows that signal to wake the process.
981 */
982 static inline int
983 wait_on_bit_lock_io(void *word, int bit, unsigned mode)
984 {
985 if (!test_and_set_bit(bit, word))
986 return 0;
987 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
988 }
989
990 /**
991 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
992 * @word: the word being waited on, a kernel virtual address
993 * @bit: the bit of the word being waited on
994 * @action: the function used to sleep, which may take special actions
995 * @mode: the task state to sleep in
996 *
997 * Use the standard hashed waitqueue table to wait for a bit
998 * to be cleared and then to set it, and allow the waiting action
999 * to be specified.
1000 * This is like wait_on_bit() but allows fine control of how the waiting
1001 * is done.
1002 *
1003 * Returns zero if the bit was (eventually) found to be clear and was
1004 * set. Returns non-zero if a signal was delivered to the process and
1005 * the @mode allows that signal to wake the process.
1006 */
1007 static inline int
1008 wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1009 {
1010 if (!test_and_set_bit(bit, word))
1011 return 0;
1012 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1013 }
1014
1015 /**
1016 * wait_on_atomic_t - Wait for an atomic_t to become 0
1017 * @val: The atomic value being waited on, a kernel virtual address
1018 * @action: the function used to sleep, which may take special actions
1019 * @mode: the task state to sleep in
1020 *
1021 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1022 * the purpose of getting a waitqueue, but we set the key to a bit number
1023 * outside of the target 'word'.
1024 */
1025 static inline
1026 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1027 {
1028 if (atomic_read(val) == 0)
1029 return 0;
1030 return out_of_line_wait_on_atomic_t(val, action, mode);
1031 }
1032
1033 #endif /* _LINUX_WAIT_H */
This page took 0.058917 seconds and 5 git commands to generate.