Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3/*
4 * Linux wait queue related types and methods
5 */
6#include <linux/list.h>
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <asm/current.h>
10#include <uapi/linux/wait.h>
11
12typedef struct __wait_queue wait_queue_t;
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16/* __wait_queue::flags */
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
20struct __wait_queue {
21 unsigned int flags;
22 void *private;
23 wait_queue_func_t func;
24 struct list_head task_list;
25};
26
27struct wait_bit_key {
28 void *flags;
29 int bit_nr;
30#define WAIT_ATOMIC_T_BIT_NR -1
31 unsigned long timeout;
32};
33
34struct wait_bit_queue {
35 struct wait_bit_key key;
36 wait_queue_t wait;
37};
38
39struct __wait_queue_head {
40 spinlock_t lock;
41 struct list_head task_list;
42};
43typedef struct __wait_queue_head wait_queue_head_t;
44
45struct task_struct;
46
47/*
48 * Macros for declaration and initialisaton of the datatypes
49 */
50
51#define __WAITQUEUE_INITIALIZER(name, tsk) { \
52 .private = tsk, \
53 .func = default_wake_function, \
54 .task_list = { NULL, NULL } }
55
56#define DECLARE_WAITQUEUE(name, tsk) \
57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
60 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
61 .task_list = { &(name).task_list, &(name).task_list } }
62
63#define DECLARE_WAIT_QUEUE_HEAD(name) \
64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
67 { .flags = word, .bit_nr = bit, }
68
69#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74#define init_waitqueue_head(q) \
75 do { \
76 static struct lock_class_key __key; \
77 \
78 __init_waitqueue_head((q), #q, &__key); \
79 } while (0)
80
81#ifdef CONFIG_LOCKDEP
82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83 ({ init_waitqueue_head(&name); name; })
84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86#else
87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88#endif
89
90static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91{
92 q->flags = 0;
93 q->private = p;
94 q->func = default_wake_function;
95}
96
97static inline void
98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99{
100 q->flags = 0;
101 q->private = NULL;
102 q->func = func;
103}
104
105static inline int waitqueue_active(wait_queue_head_t *q)
106{
107 return !list_empty(&q->task_list);
108}
109
110extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
111extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
112extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
113
114static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
115{
116 list_add(&new->task_list, &head->task_list);
117}
118
119/*
120 * Used for wake-one threads:
121 */
122static inline void
123__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
124{
125 wait->flags |= WQ_FLAG_EXCLUSIVE;
126 __add_wait_queue(q, wait);
127}
128
129static inline void __add_wait_queue_tail(wait_queue_head_t *head,
130 wait_queue_t *new)
131{
132 list_add_tail(&new->task_list, &head->task_list);
133}
134
135static inline void
136__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
137{
138 wait->flags |= WQ_FLAG_EXCLUSIVE;
139 __add_wait_queue_tail(q, wait);
140}
141
142static inline void
143__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
144{
145 list_del(&old->task_list);
146}
147
148typedef int wait_bit_action_f(struct wait_bit_key *);
149void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
150void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
151void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
152void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
153void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
154void __wake_up_bit(wait_queue_head_t *, void *, int);
155int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
156int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
157void wake_up_bit(void *, int);
158void wake_up_atomic_t(atomic_t *);
159int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
160int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
161int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
162int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
163wait_queue_head_t *bit_waitqueue(void *, int);
164
165#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
166#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
167#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
168#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
169#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
170
171#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
172#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
173#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
174#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
175
176/*
177 * Wakeup macros to be used to report events to the targets.
178 */
179#define wake_up_poll(x, m) \
180 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
181#define wake_up_locked_poll(x, m) \
182 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
183#define wake_up_interruptible_poll(x, m) \
184 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
185#define wake_up_interruptible_sync_poll(x, m) \
186 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
187
188#define ___wait_cond_timeout(condition) \
189({ \
190 bool __cond = (condition); \
191 if (__cond && !__ret) \
192 __ret = 1; \
193 __cond || !__ret; \
194})
195
196#define ___wait_is_interruptible(state) \
197 (!__builtin_constant_p(state) || \
198 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
199
200/*
201 * The below macro ___wait_event() has an explicit shadow of the __ret
202 * variable when used from the wait_event_*() macros.
203 *
204 * This is so that both can use the ___wait_cond_timeout() construct
205 * to wrap the condition.
206 *
207 * The type inconsistency of the wait_event_*() __ret variable is also
208 * on purpose; we use long where we can return timeout values and int
209 * otherwise.
210 */
211
212#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
213({ \
214 __label__ __out; \
215 wait_queue_t __wait; \
216 long __ret = ret; /* explicit shadow */ \
217 \
218 INIT_LIST_HEAD(&__wait.task_list); \
219 if (exclusive) \
220 __wait.flags = WQ_FLAG_EXCLUSIVE; \
221 else \
222 __wait.flags = 0; \
223 \
224 for (;;) { \
225 long __int = prepare_to_wait_event(&wq, &__wait, state);\
226 \
227 if (condition) \
228 break; \
229 \
230 if (___wait_is_interruptible(state) && __int) { \
231 __ret = __int; \
232 if (exclusive) { \
233 abort_exclusive_wait(&wq, &__wait, \
234 state, NULL); \
235 goto __out; \
236 } \
237 break; \
238 } \
239 \
240 cmd; \
241 } \
242 finish_wait(&wq, &__wait); \
243__out: __ret; \
244})
245
246#define __wait_event(wq, condition) \
247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
248 schedule())
249
250/**
251 * wait_event - sleep until a condition gets true
252 * @wq: the waitqueue to wait on
253 * @condition: a C expression for the event to wait for
254 *
255 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
256 * @condition evaluates to true. The @condition is checked each time
257 * the waitqueue @wq is woken up.
258 *
259 * wake_up() has to be called after changing any variable that could
260 * change the result of the wait condition.
261 */
262#define wait_event(wq, condition) \
263do { \
264 might_sleep(); \
265 if (condition) \
266 break; \
267 __wait_event(wq, condition); \
268} while (0)
269
270#define __io_wait_event(wq, condition) \
271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
272 io_schedule())
273
274/*
275 * io_wait_event() -- like wait_event() but with io_schedule()
276 */
277#define io_wait_event(wq, condition) \
278do { \
279 might_sleep(); \
280 if (condition) \
281 break; \
282 __io_wait_event(wq, condition); \
283} while (0)
284
285#define __wait_event_freezable(wq, condition) \
286 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
287 schedule(); try_to_freeze())
288
289/**
290 * wait_event - sleep (or freeze) until a condition gets true
291 * @wq: the waitqueue to wait on
292 * @condition: a C expression for the event to wait for
293 *
294 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
295 * to system load) until the @condition evaluates to true. The
296 * @condition is checked each time the waitqueue @wq is woken up.
297 *
298 * wake_up() has to be called after changing any variable that could
299 * change the result of the wait condition.
300 */
301#define wait_event_freezable(wq, condition) \
302({ \
303 int __ret = 0; \
304 might_sleep(); \
305 if (!(condition)) \
306 __ret = __wait_event_freezable(wq, condition); \
307 __ret; \
308})
309
310#define __wait_event_timeout(wq, condition, timeout) \
311 ___wait_event(wq, ___wait_cond_timeout(condition), \
312 TASK_UNINTERRUPTIBLE, 0, timeout, \
313 __ret = schedule_timeout(__ret))
314
315/**
316 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
317 * @wq: the waitqueue to wait on
318 * @condition: a C expression for the event to wait for
319 * @timeout: timeout, in jiffies
320 *
321 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
322 * @condition evaluates to true. The @condition is checked each time
323 * the waitqueue @wq is woken up.
324 *
325 * wake_up() has to be called after changing any variable that could
326 * change the result of the wait condition.
327 *
328 * Returns:
329 * 0 if the @condition evaluated to %false after the @timeout elapsed,
330 * 1 if the @condition evaluated to %true after the @timeout elapsed,
331 * or the remaining jiffies (at least 1) if the @condition evaluated
332 * to %true before the @timeout elapsed.
333 */
334#define wait_event_timeout(wq, condition, timeout) \
335({ \
336 long __ret = timeout; \
337 might_sleep(); \
338 if (!___wait_cond_timeout(condition)) \
339 __ret = __wait_event_timeout(wq, condition, timeout); \
340 __ret; \
341})
342
343#define __wait_event_freezable_timeout(wq, condition, timeout) \
344 ___wait_event(wq, ___wait_cond_timeout(condition), \
345 TASK_INTERRUPTIBLE, 0, timeout, \
346 __ret = schedule_timeout(__ret); try_to_freeze())
347
348/*
349 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
350 * increasing load and is freezable.
351 */
352#define wait_event_freezable_timeout(wq, condition, timeout) \
353({ \
354 long __ret = timeout; \
355 might_sleep(); \
356 if (!___wait_cond_timeout(condition)) \
357 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
358 __ret; \
359})
360
361#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
362 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
363 cmd1; schedule(); cmd2)
364/*
365 * Just like wait_event_cmd(), except it sets exclusive flag
366 */
367#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \
368do { \
369 if (condition) \
370 break; \
371 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \
372} while (0)
373
374#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
375 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
376 cmd1; schedule(); cmd2)
377
378/**
379 * wait_event_cmd - sleep until a condition gets true
380 * @wq: the waitqueue to wait on
381 * @condition: a C expression for the event to wait for
382 * @cmd1: the command will be executed before sleep
383 * @cmd2: the command will be executed after sleep
384 *
385 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
386 * @condition evaluates to true. The @condition is checked each time
387 * the waitqueue @wq is woken up.
388 *
389 * wake_up() has to be called after changing any variable that could
390 * change the result of the wait condition.
391 */
392#define wait_event_cmd(wq, condition, cmd1, cmd2) \
393do { \
394 if (condition) \
395 break; \
396 __wait_event_cmd(wq, condition, cmd1, cmd2); \
397} while (0)
398
399#define __wait_event_interruptible(wq, condition) \
400 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
401 schedule())
402
403/**
404 * wait_event_interruptible - sleep until a condition gets true
405 * @wq: the waitqueue to wait on
406 * @condition: a C expression for the event to wait for
407 *
408 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
409 * @condition evaluates to true or a signal is received.
410 * The @condition is checked each time the waitqueue @wq is woken up.
411 *
412 * wake_up() has to be called after changing any variable that could
413 * change the result of the wait condition.
414 *
415 * The function will return -ERESTARTSYS if it was interrupted by a
416 * signal and 0 if @condition evaluated to true.
417 */
418#define wait_event_interruptible(wq, condition) \
419({ \
420 int __ret = 0; \
421 might_sleep(); \
422 if (!(condition)) \
423 __ret = __wait_event_interruptible(wq, condition); \
424 __ret; \
425})
426
427#define __wait_event_interruptible_timeout(wq, condition, timeout) \
428 ___wait_event(wq, ___wait_cond_timeout(condition), \
429 TASK_INTERRUPTIBLE, 0, timeout, \
430 __ret = schedule_timeout(__ret))
431
432/**
433 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
434 * @wq: the waitqueue to wait on
435 * @condition: a C expression for the event to wait for
436 * @timeout: timeout, in jiffies
437 *
438 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
439 * @condition evaluates to true or a signal is received.
440 * The @condition is checked each time the waitqueue @wq is woken up.
441 *
442 * wake_up() has to be called after changing any variable that could
443 * change the result of the wait condition.
444 *
445 * Returns:
446 * 0 if the @condition evaluated to %false after the @timeout elapsed,
447 * 1 if the @condition evaluated to %true after the @timeout elapsed,
448 * the remaining jiffies (at least 1) if the @condition evaluated
449 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
450 * interrupted by a signal.
451 */
452#define wait_event_interruptible_timeout(wq, condition, timeout) \
453({ \
454 long __ret = timeout; \
455 might_sleep(); \
456 if (!___wait_cond_timeout(condition)) \
457 __ret = __wait_event_interruptible_timeout(wq, \
458 condition, timeout); \
459 __ret; \
460})
461
462#define __wait_event_hrtimeout(wq, condition, timeout, state) \
463({ \
464 int __ret = 0; \
465 struct hrtimer_sleeper __t; \
466 \
467 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \
468 HRTIMER_MODE_REL); \
469 hrtimer_init_sleeper(&__t, current); \
470 if ((timeout).tv64 != KTIME_MAX) \
471 hrtimer_start_range_ns(&__t.timer, timeout, \
472 current->timer_slack_ns, \
473 HRTIMER_MODE_REL); \
474 \
475 __ret = ___wait_event(wq, condition, state, 0, 0, \
476 if (!__t.task) { \
477 __ret = -ETIME; \
478 break; \
479 } \
480 schedule()); \
481 \
482 hrtimer_cancel(&__t.timer); \
483 destroy_hrtimer_on_stack(&__t.timer); \
484 __ret; \
485})
486
487/**
488 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
489 * @wq: the waitqueue to wait on
490 * @condition: a C expression for the event to wait for
491 * @timeout: timeout, as a ktime_t
492 *
493 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
494 * @condition evaluates to true or a signal is received.
495 * The @condition is checked each time the waitqueue @wq is woken up.
496 *
497 * wake_up() has to be called after changing any variable that could
498 * change the result of the wait condition.
499 *
500 * The function returns 0 if @condition became true, or -ETIME if the timeout
501 * elapsed.
502 */
503#define wait_event_hrtimeout(wq, condition, timeout) \
504({ \
505 int __ret = 0; \
506 might_sleep(); \
507 if (!(condition)) \
508 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
509 TASK_UNINTERRUPTIBLE); \
510 __ret; \
511})
512
513/**
514 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
515 * @wq: the waitqueue to wait on
516 * @condition: a C expression for the event to wait for
517 * @timeout: timeout, as a ktime_t
518 *
519 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
520 * @condition evaluates to true or a signal is received.
521 * The @condition is checked each time the waitqueue @wq is woken up.
522 *
523 * wake_up() has to be called after changing any variable that could
524 * change the result of the wait condition.
525 *
526 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
527 * interrupted by a signal, or -ETIME if the timeout elapsed.
528 */
529#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
530({ \
531 long __ret = 0; \
532 might_sleep(); \
533 if (!(condition)) \
534 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
535 TASK_INTERRUPTIBLE); \
536 __ret; \
537})
538
539#define __wait_event_interruptible_exclusive(wq, condition) \
540 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
541 schedule())
542
543#define wait_event_interruptible_exclusive(wq, condition) \
544({ \
545 int __ret = 0; \
546 might_sleep(); \
547 if (!(condition)) \
548 __ret = __wait_event_interruptible_exclusive(wq, condition);\
549 __ret; \
550})
551
552
553#define __wait_event_freezable_exclusive(wq, condition) \
554 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
555 schedule(); try_to_freeze())
556
557#define wait_event_freezable_exclusive(wq, condition) \
558({ \
559 int __ret = 0; \
560 might_sleep(); \
561 if (!(condition)) \
562 __ret = __wait_event_freezable_exclusive(wq, condition);\
563 __ret; \
564})
565
566
567#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
568({ \
569 int __ret = 0; \
570 DEFINE_WAIT(__wait); \
571 if (exclusive) \
572 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
573 do { \
574 if (likely(list_empty(&__wait.task_list))) \
575 __add_wait_queue_tail(&(wq), &__wait); \
576 set_current_state(TASK_INTERRUPTIBLE); \
577 if (signal_pending(current)) { \
578 __ret = -ERESTARTSYS; \
579 break; \
580 } \
581 if (irq) \
582 spin_unlock_irq(&(wq).lock); \
583 else \
584 spin_unlock(&(wq).lock); \
585 schedule(); \
586 if (irq) \
587 spin_lock_irq(&(wq).lock); \
588 else \
589 spin_lock(&(wq).lock); \
590 } while (!(condition)); \
591 __remove_wait_queue(&(wq), &__wait); \
592 __set_current_state(TASK_RUNNING); \
593 __ret; \
594})
595
596
597/**
598 * wait_event_interruptible_locked - sleep until a condition gets true
599 * @wq: the waitqueue to wait on
600 * @condition: a C expression for the event to wait for
601 *
602 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
603 * @condition evaluates to true or a signal is received.
604 * The @condition is checked each time the waitqueue @wq is woken up.
605 *
606 * It must be called with wq.lock being held. This spinlock is
607 * unlocked while sleeping but @condition testing is done while lock
608 * is held and when this macro exits the lock is held.
609 *
610 * The lock is locked/unlocked using spin_lock()/spin_unlock()
611 * functions which must match the way they are locked/unlocked outside
612 * of this macro.
613 *
614 * wake_up_locked() has to be called after changing any variable that could
615 * change the result of the wait condition.
616 *
617 * The function will return -ERESTARTSYS if it was interrupted by a
618 * signal and 0 if @condition evaluated to true.
619 */
620#define wait_event_interruptible_locked(wq, condition) \
621 ((condition) \
622 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
623
624/**
625 * wait_event_interruptible_locked_irq - sleep until a condition gets true
626 * @wq: the waitqueue to wait on
627 * @condition: a C expression for the event to wait for
628 *
629 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
630 * @condition evaluates to true or a signal is received.
631 * The @condition is checked each time the waitqueue @wq is woken up.
632 *
633 * It must be called with wq.lock being held. This spinlock is
634 * unlocked while sleeping but @condition testing is done while lock
635 * is held and when this macro exits the lock is held.
636 *
637 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
638 * functions which must match the way they are locked/unlocked outside
639 * of this macro.
640 *
641 * wake_up_locked() has to be called after changing any variable that could
642 * change the result of the wait condition.
643 *
644 * The function will return -ERESTARTSYS if it was interrupted by a
645 * signal and 0 if @condition evaluated to true.
646 */
647#define wait_event_interruptible_locked_irq(wq, condition) \
648 ((condition) \
649 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
650
651/**
652 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
653 * @wq: the waitqueue to wait on
654 * @condition: a C expression for the event to wait for
655 *
656 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
657 * @condition evaluates to true or a signal is received.
658 * The @condition is checked each time the waitqueue @wq is woken up.
659 *
660 * It must be called with wq.lock being held. This spinlock is
661 * unlocked while sleeping but @condition testing is done while lock
662 * is held and when this macro exits the lock is held.
663 *
664 * The lock is locked/unlocked using spin_lock()/spin_unlock()
665 * functions which must match the way they are locked/unlocked outside
666 * of this macro.
667 *
668 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
669 * set thus when other process waits process on the list if this
670 * process is awaken further processes are not considered.
671 *
672 * wake_up_locked() has to be called after changing any variable that could
673 * change the result of the wait condition.
674 *
675 * The function will return -ERESTARTSYS if it was interrupted by a
676 * signal and 0 if @condition evaluated to true.
677 */
678#define wait_event_interruptible_exclusive_locked(wq, condition) \
679 ((condition) \
680 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
681
682/**
683 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
684 * @wq: the waitqueue to wait on
685 * @condition: a C expression for the event to wait for
686 *
687 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
688 * @condition evaluates to true or a signal is received.
689 * The @condition is checked each time the waitqueue @wq is woken up.
690 *
691 * It must be called with wq.lock being held. This spinlock is
692 * unlocked while sleeping but @condition testing is done while lock
693 * is held and when this macro exits the lock is held.
694 *
695 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
696 * functions which must match the way they are locked/unlocked outside
697 * of this macro.
698 *
699 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
700 * set thus when other process waits process on the list if this
701 * process is awaken further processes are not considered.
702 *
703 * wake_up_locked() has to be called after changing any variable that could
704 * change the result of the wait condition.
705 *
706 * The function will return -ERESTARTSYS if it was interrupted by a
707 * signal and 0 if @condition evaluated to true.
708 */
709#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
710 ((condition) \
711 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
712
713
714#define __wait_event_killable(wq, condition) \
715 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
716
717/**
718 * wait_event_killable - sleep until a condition gets true
719 * @wq: the waitqueue to wait on
720 * @condition: a C expression for the event to wait for
721 *
722 * The process is put to sleep (TASK_KILLABLE) until the
723 * @condition evaluates to true or a signal is received.
724 * The @condition is checked each time the waitqueue @wq is woken up.
725 *
726 * wake_up() has to be called after changing any variable that could
727 * change the result of the wait condition.
728 *
729 * The function will return -ERESTARTSYS if it was interrupted by a
730 * signal and 0 if @condition evaluated to true.
731 */
732#define wait_event_killable(wq, condition) \
733({ \
734 int __ret = 0; \
735 might_sleep(); \
736 if (!(condition)) \
737 __ret = __wait_event_killable(wq, condition); \
738 __ret; \
739})
740
741
742#define __wait_event_lock_irq(wq, condition, lock, cmd) \
743 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
744 spin_unlock_irq(&lock); \
745 cmd; \
746 schedule(); \
747 spin_lock_irq(&lock))
748
749/**
750 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
751 * condition is checked under the lock. This
752 * is expected to be called with the lock
753 * taken.
754 * @wq: the waitqueue to wait on
755 * @condition: a C expression for the event to wait for
756 * @lock: a locked spinlock_t, which will be released before cmd
757 * and schedule() and reacquired afterwards.
758 * @cmd: a command which is invoked outside the critical section before
759 * sleep
760 *
761 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
762 * @condition evaluates to true. The @condition is checked each time
763 * the waitqueue @wq is woken up.
764 *
765 * wake_up() has to be called after changing any variable that could
766 * change the result of the wait condition.
767 *
768 * This is supposed to be called while holding the lock. The lock is
769 * dropped before invoking the cmd and going to sleep and is reacquired
770 * afterwards.
771 */
772#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
773do { \
774 if (condition) \
775 break; \
776 __wait_event_lock_irq(wq, condition, lock, cmd); \
777} while (0)
778
779/**
780 * wait_event_lock_irq - sleep until a condition gets true. The
781 * condition is checked under the lock. This
782 * is expected to be called with the lock
783 * taken.
784 * @wq: the waitqueue to wait on
785 * @condition: a C expression for the event to wait for
786 * @lock: a locked spinlock_t, which will be released before schedule()
787 * and reacquired afterwards.
788 *
789 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
790 * @condition evaluates to true. The @condition is checked each time
791 * the waitqueue @wq is woken up.
792 *
793 * wake_up() has to be called after changing any variable that could
794 * change the result of the wait condition.
795 *
796 * This is supposed to be called while holding the lock. The lock is
797 * dropped before going to sleep and is reacquired afterwards.
798 */
799#define wait_event_lock_irq(wq, condition, lock) \
800do { \
801 if (condition) \
802 break; \
803 __wait_event_lock_irq(wq, condition, lock, ); \
804} while (0)
805
806
807#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
808 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
809 spin_unlock_irq(&lock); \
810 cmd; \
811 schedule(); \
812 spin_lock_irq(&lock))
813
814/**
815 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
816 * The condition is checked under the lock. This is expected to
817 * be called with the lock taken.
818 * @wq: the waitqueue to wait on
819 * @condition: a C expression for the event to wait for
820 * @lock: a locked spinlock_t, which will be released before cmd and
821 * schedule() and reacquired afterwards.
822 * @cmd: a command which is invoked outside the critical section before
823 * sleep
824 *
825 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
826 * @condition evaluates to true or a signal is received. The @condition is
827 * checked each time the waitqueue @wq is woken up.
828 *
829 * wake_up() has to be called after changing any variable that could
830 * change the result of the wait condition.
831 *
832 * This is supposed to be called while holding the lock. The lock is
833 * dropped before invoking the cmd and going to sleep and is reacquired
834 * afterwards.
835 *
836 * The macro will return -ERESTARTSYS if it was interrupted by a signal
837 * and 0 if @condition evaluated to true.
838 */
839#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
840({ \
841 int __ret = 0; \
842 if (!(condition)) \
843 __ret = __wait_event_interruptible_lock_irq(wq, \
844 condition, lock, cmd); \
845 __ret; \
846})
847
848/**
849 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
850 * The condition is checked under the lock. This is expected
851 * to be called with the lock taken.
852 * @wq: the waitqueue to wait on
853 * @condition: a C expression for the event to wait for
854 * @lock: a locked spinlock_t, which will be released before schedule()
855 * and reacquired afterwards.
856 *
857 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
858 * @condition evaluates to true or signal is received. The @condition is
859 * checked each time the waitqueue @wq is woken up.
860 *
861 * wake_up() has to be called after changing any variable that could
862 * change the result of the wait condition.
863 *
864 * This is supposed to be called while holding the lock. The lock is
865 * dropped before going to sleep and is reacquired afterwards.
866 *
867 * The macro will return -ERESTARTSYS if it was interrupted by a signal
868 * and 0 if @condition evaluated to true.
869 */
870#define wait_event_interruptible_lock_irq(wq, condition, lock) \
871({ \
872 int __ret = 0; \
873 if (!(condition)) \
874 __ret = __wait_event_interruptible_lock_irq(wq, \
875 condition, lock,); \
876 __ret; \
877})
878
879#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
880 lock, timeout) \
881 ___wait_event(wq, ___wait_cond_timeout(condition), \
882 TASK_INTERRUPTIBLE, 0, timeout, \
883 spin_unlock_irq(&lock); \
884 __ret = schedule_timeout(__ret); \
885 spin_lock_irq(&lock));
886
887/**
888 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
889 * true or a timeout elapses. The condition is checked under
890 * the lock. This is expected to be called with the lock taken.
891 * @wq: the waitqueue to wait on
892 * @condition: a C expression for the event to wait for
893 * @lock: a locked spinlock_t, which will be released before schedule()
894 * and reacquired afterwards.
895 * @timeout: timeout, in jiffies
896 *
897 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
898 * @condition evaluates to true or signal is received. The @condition is
899 * checked each time the waitqueue @wq is woken up.
900 *
901 * wake_up() has to be called after changing any variable that could
902 * change the result of the wait condition.
903 *
904 * This is supposed to be called while holding the lock. The lock is
905 * dropped before going to sleep and is reacquired afterwards.
906 *
907 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
908 * was interrupted by a signal, and the remaining jiffies otherwise
909 * if the condition evaluated to true before the timeout elapsed.
910 */
911#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
912 timeout) \
913({ \
914 long __ret = timeout; \
915 if (!___wait_cond_timeout(condition)) \
916 __ret = __wait_event_interruptible_lock_irq_timeout( \
917 wq, condition, lock, timeout); \
918 __ret; \
919})
920
921/*
922 * Waitqueues which are removed from the waitqueue_head at wakeup time
923 */
924void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
925void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
926long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
927void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
928void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
929long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
930int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
931int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
932int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
933
934#define DEFINE_WAIT_FUNC(name, function) \
935 wait_queue_t name = { \
936 .private = current, \
937 .func = function, \
938 .task_list = LIST_HEAD_INIT((name).task_list), \
939 }
940
941#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
942
943#define DEFINE_WAIT_BIT(name, word, bit) \
944 struct wait_bit_queue name = { \
945 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
946 .wait = { \
947 .private = current, \
948 .func = wake_bit_function, \
949 .task_list = \
950 LIST_HEAD_INIT((name).wait.task_list), \
951 }, \
952 }
953
954#define init_wait(wait) \
955 do { \
956 (wait)->private = current; \
957 (wait)->func = autoremove_wake_function; \
958 INIT_LIST_HEAD(&(wait)->task_list); \
959 (wait)->flags = 0; \
960 } while (0)
961
962
963extern int bit_wait(struct wait_bit_key *);
964extern int bit_wait_io(struct wait_bit_key *);
965extern int bit_wait_timeout(struct wait_bit_key *);
966extern int bit_wait_io_timeout(struct wait_bit_key *);
967
968/**
969 * wait_on_bit - wait for a bit to be cleared
970 * @word: the word being waited on, a kernel virtual address
971 * @bit: the bit of the word being waited on
972 * @mode: the task state to sleep in
973 *
974 * There is a standard hashed waitqueue table for generic use. This
975 * is the part of the hashtable's accessor API that waits on a bit.
976 * For instance, if one were to have waiters on a bitflag, one would
977 * call wait_on_bit() in threads waiting for the bit to clear.
978 * One uses wait_on_bit() where one is waiting for the bit to clear,
979 * but has no intention of setting it.
980 * Returned value will be zero if the bit was cleared, or non-zero
981 * if the process received a signal and the mode permitted wakeup
982 * on that signal.
983 */
984static inline int
985wait_on_bit(unsigned long *word, int bit, unsigned mode)
986{
987 might_sleep();
988 if (!test_bit(bit, word))
989 return 0;
990 return out_of_line_wait_on_bit(word, bit,
991 bit_wait,
992 mode);
993}
994
995/**
996 * wait_on_bit_io - wait for a bit to be cleared
997 * @word: the word being waited on, a kernel virtual address
998 * @bit: the bit of the word being waited on
999 * @mode: the task state to sleep in
1000 *
1001 * Use the standard hashed waitqueue table to wait for a bit
1002 * to be cleared. This is similar to wait_on_bit(), but calls
1003 * io_schedule() instead of schedule() for the actual waiting.
1004 *
1005 * Returned value will be zero if the bit was cleared, or non-zero
1006 * if the process received a signal and the mode permitted wakeup
1007 * on that signal.
1008 */
1009static inline int
1010wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1011{
1012 might_sleep();
1013 if (!test_bit(bit, word))
1014 return 0;
1015 return out_of_line_wait_on_bit(word, bit,
1016 bit_wait_io,
1017 mode);
1018}
1019
1020/**
1021 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1022 * @word: the word being waited on, a kernel virtual address
1023 * @bit: the bit of the word being waited on
1024 * @mode: the task state to sleep in
1025 * @timeout: timeout, in jiffies
1026 *
1027 * Use the standard hashed waitqueue table to wait for a bit
1028 * to be cleared. This is similar to wait_on_bit(), except also takes a
1029 * timeout parameter.
1030 *
1031 * Returned value will be zero if the bit was cleared before the
1032 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1033 * received a signal and the mode permitted wakeup on that signal.
1034 */
1035static inline int
1036wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1037 unsigned long timeout)
1038{
1039 might_sleep();
1040 if (!test_bit(bit, word))
1041 return 0;
1042 return out_of_line_wait_on_bit_timeout(word, bit,
1043 bit_wait_timeout,
1044 mode, timeout);
1045}
1046
1047/**
1048 * wait_on_bit_action - wait for a bit to be cleared
1049 * @word: the word being waited on, a kernel virtual address
1050 * @bit: the bit of the word being waited on
1051 * @action: the function used to sleep, which may take special actions
1052 * @mode: the task state to sleep in
1053 *
1054 * Use the standard hashed waitqueue table to wait for a bit
1055 * to be cleared, and allow the waiting action to be specified.
1056 * This is like wait_on_bit() but allows fine control of how the waiting
1057 * is done.
1058 *
1059 * Returned value will be zero if the bit was cleared, or non-zero
1060 * if the process received a signal and the mode permitted wakeup
1061 * on that signal.
1062 */
1063static inline int
1064wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1065 unsigned mode)
1066{
1067 might_sleep();
1068 if (!test_bit(bit, word))
1069 return 0;
1070 return out_of_line_wait_on_bit(word, bit, action, mode);
1071}
1072
1073/**
1074 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1075 * @word: the word being waited on, a kernel virtual address
1076 * @bit: the bit of the word being waited on
1077 * @mode: the task state to sleep in
1078 *
1079 * There is a standard hashed waitqueue table for generic use. This
1080 * is the part of the hashtable's accessor API that waits on a bit
1081 * when one intends to set it, for instance, trying to lock bitflags.
1082 * For instance, if one were to have waiters trying to set bitflag
1083 * and waiting for it to clear before setting it, one would call
1084 * wait_on_bit() in threads waiting to be able to set the bit.
1085 * One uses wait_on_bit_lock() where one is waiting for the bit to
1086 * clear with the intention of setting it, and when done, clearing it.
1087 *
1088 * Returns zero if the bit was (eventually) found to be clear and was
1089 * set. Returns non-zero if a signal was delivered to the process and
1090 * the @mode allows that signal to wake the process.
1091 */
1092static inline int
1093wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1094{
1095 might_sleep();
1096 if (!test_and_set_bit(bit, word))
1097 return 0;
1098 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1099}
1100
1101/**
1102 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1103 * @word: the word being waited on, a kernel virtual address
1104 * @bit: the bit of the word being waited on
1105 * @mode: the task state to sleep in
1106 *
1107 * Use the standard hashed waitqueue table to wait for a bit
1108 * to be cleared and then to atomically set it. This is similar
1109 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1110 * for the actual waiting.
1111 *
1112 * Returns zero if the bit was (eventually) found to be clear and was
1113 * set. Returns non-zero if a signal was delivered to the process and
1114 * the @mode allows that signal to wake the process.
1115 */
1116static inline int
1117wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1118{
1119 might_sleep();
1120 if (!test_and_set_bit(bit, word))
1121 return 0;
1122 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1123}
1124
1125/**
1126 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1127 * @word: the word being waited on, a kernel virtual address
1128 * @bit: the bit of the word being waited on
1129 * @action: the function used to sleep, which may take special actions
1130 * @mode: the task state to sleep in
1131 *
1132 * Use the standard hashed waitqueue table to wait for a bit
1133 * to be cleared and then to set it, and allow the waiting action
1134 * to be specified.
1135 * This is like wait_on_bit() but allows fine control of how the waiting
1136 * is done.
1137 *
1138 * Returns zero if the bit was (eventually) found to be clear and was
1139 * set. Returns non-zero if a signal was delivered to the process and
1140 * the @mode allows that signal to wake the process.
1141 */
1142static inline int
1143wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1144 unsigned mode)
1145{
1146 might_sleep();
1147 if (!test_and_set_bit(bit, word))
1148 return 0;
1149 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1150}
1151
1152/**
1153 * wait_on_atomic_t - Wait for an atomic_t to become 0
1154 * @val: The atomic value being waited on, a kernel virtual address
1155 * @action: the function used to sleep, which may take special actions
1156 * @mode: the task state to sleep in
1157 *
1158 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1159 * the purpose of getting a waitqueue, but we set the key to a bit number
1160 * outside of the target 'word'.
1161 */
1162static inline
1163int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1164{
1165 might_sleep();
1166 if (atomic_read(val) == 0)
1167 return 0;
1168 return out_of_line_wait_on_atomic_t(val, action, mode);
1169}
1170
1171#endif /* _LINUX_WAIT_H */