Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_WAIT_H
3#define _LINUX_WAIT_H
4/*
5 * Linux wait queue related types and methods
6 */
7#include <linux/list.h>
8#include <linux/stddef.h>
9#include <linux/spinlock.h>
10
11#include <asm/current.h>
12#include <uapi/linux/wait.h>
13
14typedef struct wait_queue_entry wait_queue_entry_t;
15
16typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18
19/* wait_queue_entry::flags */
20#define WQ_FLAG_EXCLUSIVE 0x01
21#define WQ_FLAG_WOKEN 0x02
22#define WQ_FLAG_BOOKMARK 0x04
23
24/*
25 * A single wait-queue entry structure:
26 */
27struct wait_queue_entry {
28 unsigned int flags;
29 void *private;
30 wait_queue_func_t func;
31 struct list_head entry;
32};
33
34struct wait_queue_head {
35 spinlock_t lock;
36 struct list_head head;
37};
38typedef struct wait_queue_head wait_queue_head_t;
39
40struct task_struct;
41
42/*
43 * Macros for declaration and initialisaton of the datatypes
44 */
45
46#define __WAITQUEUE_INITIALIZER(name, tsk) { \
47 .private = tsk, \
48 .func = default_wake_function, \
49 .entry = { NULL, NULL } }
50
51#define DECLARE_WAITQUEUE(name, tsk) \
52 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
53
54#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
55 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
56 .head = { &(name).head, &(name).head } }
57
58#define DECLARE_WAIT_QUEUE_HEAD(name) \
59 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
60
61extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
62
63#define init_waitqueue_head(wq_head) \
64 do { \
65 static struct lock_class_key __key; \
66 \
67 __init_waitqueue_head((wq_head), #wq_head, &__key); \
68 } while (0)
69
70#ifdef CONFIG_LOCKDEP
71# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
72 ({ init_waitqueue_head(&name); name; })
73# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
74 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
75#else
76# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
77#endif
78
79static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
80{
81 wq_entry->flags = 0;
82 wq_entry->private = p;
83 wq_entry->func = default_wake_function;
84}
85
86static inline void
87init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
88{
89 wq_entry->flags = 0;
90 wq_entry->private = NULL;
91 wq_entry->func = func;
92}
93
94/**
95 * waitqueue_active -- locklessly test for waiters on the queue
96 * @wq_head: the waitqueue to test for waiters
97 *
98 * returns true if the wait list is not empty
99 *
100 * NOTE: this function is lockless and requires care, incorrect usage _will_
101 * lead to sporadic and non-obvious failure.
102 *
103 * Use either while holding wait_queue_head::lock or when used for wakeups
104 * with an extra smp_mb() like::
105 *
106 * CPU0 - waker CPU1 - waiter
107 *
108 * for (;;) {
109 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
110 * smp_mb(); // smp_mb() from set_current_state()
111 * if (waitqueue_active(wq_head)) if (@cond)
112 * wake_up(wq_head); break;
113 * schedule();
114 * }
115 * finish_wait(&wq_head, &wait);
116 *
117 * Because without the explicit smp_mb() it's possible for the
118 * waitqueue_active() load to get hoisted over the @cond store such that we'll
119 * observe an empty wait list while the waiter might not observe @cond.
120 *
121 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
122 * which (when the lock is uncontended) are of roughly equal cost.
123 */
124static inline int waitqueue_active(struct wait_queue_head *wq_head)
125{
126 return !list_empty(&wq_head->head);
127}
128
129/**
130 * wq_has_single_sleeper - check if there is only one sleeper
131 * @wq_head: wait queue head
132 *
133 * Returns true of wq_head has only one sleeper on the list.
134 *
135 * Please refer to the comment for waitqueue_active.
136 */
137static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
138{
139 return list_is_singular(&wq_head->head);
140}
141
142/**
143 * wq_has_sleeper - check if there are any waiting processes
144 * @wq_head: wait queue head
145 *
146 * Returns true if wq_head has waiting processes
147 *
148 * Please refer to the comment for waitqueue_active.
149 */
150static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
151{
152 /*
153 * We need to be sure we are in sync with the
154 * add_wait_queue modifications to the wait queue.
155 *
156 * This memory barrier should be paired with one on the
157 * waiting side.
158 */
159 smp_mb();
160 return waitqueue_active(wq_head);
161}
162
163extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
164extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
165extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
166
167static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
168{
169 list_add(&wq_entry->entry, &wq_head->head);
170}
171
172/*
173 * Used for wake-one threads:
174 */
175static inline void
176__add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
177{
178 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
179 __add_wait_queue(wq_head, wq_entry);
180}
181
182static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
183{
184 list_add_tail(&wq_entry->entry, &wq_head->head);
185}
186
187static inline void
188__add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
189{
190 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
191 __add_wait_queue_entry_tail(wq_head, wq_entry);
192}
193
194static inline void
195__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
196{
197 list_del(&wq_entry->entry);
198}
199
200void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
201void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
202void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
203 unsigned int mode, void *key, wait_queue_entry_t *bookmark);
204void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
205void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
206void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
207void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
208
209#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
210#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
211#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
212#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
213#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
214
215#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
216#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
217#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
218#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
219
220/*
221 * Wakeup macros to be used to report events to the targets.
222 */
223#define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m))
224#define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m))
225#define wake_up_poll(x, m) \
226 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m))
227#define wake_up_locked_poll(x, m) \
228 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m))
229#define wake_up_interruptible_poll(x, m) \
230 __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
231#define wake_up_interruptible_sync_poll(x, m) \
232 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
233#define wake_up_interruptible_sync_poll_locked(x, m) \
234 __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
235
236#define ___wait_cond_timeout(condition) \
237({ \
238 bool __cond = (condition); \
239 if (__cond && !__ret) \
240 __ret = 1; \
241 __cond || !__ret; \
242})
243
244#define ___wait_is_interruptible(state) \
245 (!__builtin_constant_p(state) || \
246 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
247
248extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
249
250/*
251 * The below macro ___wait_event() has an explicit shadow of the __ret
252 * variable when used from the wait_event_*() macros.
253 *
254 * This is so that both can use the ___wait_cond_timeout() construct
255 * to wrap the condition.
256 *
257 * The type inconsistency of the wait_event_*() __ret variable is also
258 * on purpose; we use long where we can return timeout values and int
259 * otherwise.
260 */
261
262#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
263({ \
264 __label__ __out; \
265 struct wait_queue_entry __wq_entry; \
266 long __ret = ret; /* explicit shadow */ \
267 \
268 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
269 for (;;) { \
270 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
271 \
272 if (condition) \
273 break; \
274 \
275 if (___wait_is_interruptible(state) && __int) { \
276 __ret = __int; \
277 goto __out; \
278 } \
279 \
280 cmd; \
281 } \
282 finish_wait(&wq_head, &__wq_entry); \
283__out: __ret; \
284})
285
286#define __wait_event(wq_head, condition) \
287 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
288 schedule())
289
290/**
291 * wait_event - sleep until a condition gets true
292 * @wq_head: the waitqueue to wait on
293 * @condition: a C expression for the event to wait for
294 *
295 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
296 * @condition evaluates to true. The @condition is checked each time
297 * the waitqueue @wq_head is woken up.
298 *
299 * wake_up() has to be called after changing any variable that could
300 * change the result of the wait condition.
301 */
302#define wait_event(wq_head, condition) \
303do { \
304 might_sleep(); \
305 if (condition) \
306 break; \
307 __wait_event(wq_head, condition); \
308} while (0)
309
310#define __io_wait_event(wq_head, condition) \
311 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
312 io_schedule())
313
314/*
315 * io_wait_event() -- like wait_event() but with io_schedule()
316 */
317#define io_wait_event(wq_head, condition) \
318do { \
319 might_sleep(); \
320 if (condition) \
321 break; \
322 __io_wait_event(wq_head, condition); \
323} while (0)
324
325#define __wait_event_freezable(wq_head, condition) \
326 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
327 freezable_schedule())
328
329/**
330 * wait_event_freezable - sleep (or freeze) until a condition gets true
331 * @wq_head: the waitqueue to wait on
332 * @condition: a C expression for the event to wait for
333 *
334 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
335 * to system load) until the @condition evaluates to true. The
336 * @condition is checked each time the waitqueue @wq_head is woken up.
337 *
338 * wake_up() has to be called after changing any variable that could
339 * change the result of the wait condition.
340 */
341#define wait_event_freezable(wq_head, condition) \
342({ \
343 int __ret = 0; \
344 might_sleep(); \
345 if (!(condition)) \
346 __ret = __wait_event_freezable(wq_head, condition); \
347 __ret; \
348})
349
350#define __wait_event_timeout(wq_head, condition, timeout) \
351 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
352 TASK_UNINTERRUPTIBLE, 0, timeout, \
353 __ret = schedule_timeout(__ret))
354
355/**
356 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
357 * @wq_head: the waitqueue to wait on
358 * @condition: a C expression for the event to wait for
359 * @timeout: timeout, in jiffies
360 *
361 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
362 * @condition evaluates to true. The @condition is checked each time
363 * the waitqueue @wq_head is woken up.
364 *
365 * wake_up() has to be called after changing any variable that could
366 * change the result of the wait condition.
367 *
368 * Returns:
369 * 0 if the @condition evaluated to %false after the @timeout elapsed,
370 * 1 if the @condition evaluated to %true after the @timeout elapsed,
371 * or the remaining jiffies (at least 1) if the @condition evaluated
372 * to %true before the @timeout elapsed.
373 */
374#define wait_event_timeout(wq_head, condition, timeout) \
375({ \
376 long __ret = timeout; \
377 might_sleep(); \
378 if (!___wait_cond_timeout(condition)) \
379 __ret = __wait_event_timeout(wq_head, condition, timeout); \
380 __ret; \
381})
382
383#define __wait_event_freezable_timeout(wq_head, condition, timeout) \
384 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
385 TASK_INTERRUPTIBLE, 0, timeout, \
386 __ret = freezable_schedule_timeout(__ret))
387
388/*
389 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
390 * increasing load and is freezable.
391 */
392#define wait_event_freezable_timeout(wq_head, condition, timeout) \
393({ \
394 long __ret = timeout; \
395 might_sleep(); \
396 if (!___wait_cond_timeout(condition)) \
397 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
398 __ret; \
399})
400
401#define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
402 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
403 cmd1; schedule(); cmd2)
404/*
405 * Just like wait_event_cmd(), except it sets exclusive flag
406 */
407#define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
408do { \
409 if (condition) \
410 break; \
411 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
412} while (0)
413
414#define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
415 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
416 cmd1; schedule(); cmd2)
417
418/**
419 * wait_event_cmd - sleep until a condition gets true
420 * @wq_head: the waitqueue to wait on
421 * @condition: a C expression for the event to wait for
422 * @cmd1: the command will be executed before sleep
423 * @cmd2: the command will be executed after sleep
424 *
425 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
426 * @condition evaluates to true. The @condition is checked each time
427 * the waitqueue @wq_head is woken up.
428 *
429 * wake_up() has to be called after changing any variable that could
430 * change the result of the wait condition.
431 */
432#define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
433do { \
434 if (condition) \
435 break; \
436 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
437} while (0)
438
439#define __wait_event_interruptible(wq_head, condition) \
440 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
441 schedule())
442
443/**
444 * wait_event_interruptible - sleep until a condition gets true
445 * @wq_head: the waitqueue to wait on
446 * @condition: a C expression for the event to wait for
447 *
448 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
449 * @condition evaluates to true or a signal is received.
450 * The @condition is checked each time the waitqueue @wq_head is woken up.
451 *
452 * wake_up() has to be called after changing any variable that could
453 * change the result of the wait condition.
454 *
455 * The function will return -ERESTARTSYS if it was interrupted by a
456 * signal and 0 if @condition evaluated to true.
457 */
458#define wait_event_interruptible(wq_head, condition) \
459({ \
460 int __ret = 0; \
461 might_sleep(); \
462 if (!(condition)) \
463 __ret = __wait_event_interruptible(wq_head, condition); \
464 __ret; \
465})
466
467#define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
468 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
469 TASK_INTERRUPTIBLE, 0, timeout, \
470 __ret = schedule_timeout(__ret))
471
472/**
473 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
474 * @wq_head: the waitqueue to wait on
475 * @condition: a C expression for the event to wait for
476 * @timeout: timeout, in jiffies
477 *
478 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
479 * @condition evaluates to true or a signal is received.
480 * The @condition is checked each time the waitqueue @wq_head is woken up.
481 *
482 * wake_up() has to be called after changing any variable that could
483 * change the result of the wait condition.
484 *
485 * Returns:
486 * 0 if the @condition evaluated to %false after the @timeout elapsed,
487 * 1 if the @condition evaluated to %true after the @timeout elapsed,
488 * the remaining jiffies (at least 1) if the @condition evaluated
489 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
490 * interrupted by a signal.
491 */
492#define wait_event_interruptible_timeout(wq_head, condition, timeout) \
493({ \
494 long __ret = timeout; \
495 might_sleep(); \
496 if (!___wait_cond_timeout(condition)) \
497 __ret = __wait_event_interruptible_timeout(wq_head, \
498 condition, timeout); \
499 __ret; \
500})
501
502#define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
503({ \
504 int __ret = 0; \
505 struct hrtimer_sleeper __t; \
506 \
507 hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \
508 HRTIMER_MODE_REL); \
509 if ((timeout) != KTIME_MAX) \
510 hrtimer_start_range_ns(&__t.timer, timeout, \
511 current->timer_slack_ns, \
512 HRTIMER_MODE_REL); \
513 \
514 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
515 if (!__t.task) { \
516 __ret = -ETIME; \
517 break; \
518 } \
519 schedule()); \
520 \
521 hrtimer_cancel(&__t.timer); \
522 destroy_hrtimer_on_stack(&__t.timer); \
523 __ret; \
524})
525
526/**
527 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
528 * @wq_head: the waitqueue to wait on
529 * @condition: a C expression for the event to wait for
530 * @timeout: timeout, as a ktime_t
531 *
532 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
533 * @condition evaluates to true or a signal is received.
534 * The @condition is checked each time the waitqueue @wq_head is woken up.
535 *
536 * wake_up() has to be called after changing any variable that could
537 * change the result of the wait condition.
538 *
539 * The function returns 0 if @condition became true, or -ETIME if the timeout
540 * elapsed.
541 */
542#define wait_event_hrtimeout(wq_head, condition, timeout) \
543({ \
544 int __ret = 0; \
545 might_sleep(); \
546 if (!(condition)) \
547 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
548 TASK_UNINTERRUPTIBLE); \
549 __ret; \
550})
551
552/**
553 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
554 * @wq: the waitqueue to wait on
555 * @condition: a C expression for the event to wait for
556 * @timeout: timeout, as a ktime_t
557 *
558 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
559 * @condition evaluates to true or a signal is received.
560 * The @condition is checked each time the waitqueue @wq is woken up.
561 *
562 * wake_up() has to be called after changing any variable that could
563 * change the result of the wait condition.
564 *
565 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
566 * interrupted by a signal, or -ETIME if the timeout elapsed.
567 */
568#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
569({ \
570 long __ret = 0; \
571 might_sleep(); \
572 if (!(condition)) \
573 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
574 TASK_INTERRUPTIBLE); \
575 __ret; \
576})
577
578#define __wait_event_interruptible_exclusive(wq, condition) \
579 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
580 schedule())
581
582#define wait_event_interruptible_exclusive(wq, condition) \
583({ \
584 int __ret = 0; \
585 might_sleep(); \
586 if (!(condition)) \
587 __ret = __wait_event_interruptible_exclusive(wq, condition); \
588 __ret; \
589})
590
591#define __wait_event_killable_exclusive(wq, condition) \
592 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
593 schedule())
594
595#define wait_event_killable_exclusive(wq, condition) \
596({ \
597 int __ret = 0; \
598 might_sleep(); \
599 if (!(condition)) \
600 __ret = __wait_event_killable_exclusive(wq, condition); \
601 __ret; \
602})
603
604
605#define __wait_event_freezable_exclusive(wq, condition) \
606 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
607 freezable_schedule())
608
609#define wait_event_freezable_exclusive(wq, condition) \
610({ \
611 int __ret = 0; \
612 might_sleep(); \
613 if (!(condition)) \
614 __ret = __wait_event_freezable_exclusive(wq, condition); \
615 __ret; \
616})
617
618/**
619 * wait_event_idle - wait for a condition without contributing to system load
620 * @wq_head: the waitqueue to wait on
621 * @condition: a C expression for the event to wait for
622 *
623 * The process is put to sleep (TASK_IDLE) until the
624 * @condition evaluates to true.
625 * The @condition is checked each time the waitqueue @wq_head is woken up.
626 *
627 * wake_up() has to be called after changing any variable that could
628 * change the result of the wait condition.
629 *
630 */
631#define wait_event_idle(wq_head, condition) \
632do { \
633 might_sleep(); \
634 if (!(condition)) \
635 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, schedule()); \
636} while (0)
637
638/**
639 * wait_event_idle_exclusive - wait for a condition with contributing to system load
640 * @wq_head: the waitqueue to wait on
641 * @condition: a C expression for the event to wait for
642 *
643 * The process is put to sleep (TASK_IDLE) until the
644 * @condition evaluates to true.
645 * The @condition is checked each time the waitqueue @wq_head is woken up.
646 *
647 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
648 * set thus if other processes wait on the same list, when this
649 * process is woken further processes are not considered.
650 *
651 * wake_up() has to be called after changing any variable that could
652 * change the result of the wait condition.
653 *
654 */
655#define wait_event_idle_exclusive(wq_head, condition) \
656do { \
657 might_sleep(); \
658 if (!(condition)) \
659 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, schedule()); \
660} while (0)
661
662#define __wait_event_idle_timeout(wq_head, condition, timeout) \
663 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
664 TASK_IDLE, 0, timeout, \
665 __ret = schedule_timeout(__ret))
666
667/**
668 * wait_event_idle_timeout - sleep without load until a condition becomes true or a timeout elapses
669 * @wq_head: the waitqueue to wait on
670 * @condition: a C expression for the event to wait for
671 * @timeout: timeout, in jiffies
672 *
673 * The process is put to sleep (TASK_IDLE) until the
674 * @condition evaluates to true. The @condition is checked each time
675 * the waitqueue @wq_head is woken up.
676 *
677 * wake_up() has to be called after changing any variable that could
678 * change the result of the wait condition.
679 *
680 * Returns:
681 * 0 if the @condition evaluated to %false after the @timeout elapsed,
682 * 1 if the @condition evaluated to %true after the @timeout elapsed,
683 * or the remaining jiffies (at least 1) if the @condition evaluated
684 * to %true before the @timeout elapsed.
685 */
686#define wait_event_idle_timeout(wq_head, condition, timeout) \
687({ \
688 long __ret = timeout; \
689 might_sleep(); \
690 if (!___wait_cond_timeout(condition)) \
691 __ret = __wait_event_idle_timeout(wq_head, condition, timeout); \
692 __ret; \
693})
694
695#define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
696 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
697 TASK_IDLE, 1, timeout, \
698 __ret = schedule_timeout(__ret))
699
700/**
701 * wait_event_idle_exclusive_timeout - sleep without load until a condition becomes true or a timeout elapses
702 * @wq_head: the waitqueue to wait on
703 * @condition: a C expression for the event to wait for
704 * @timeout: timeout, in jiffies
705 *
706 * The process is put to sleep (TASK_IDLE) until the
707 * @condition evaluates to true. The @condition is checked each time
708 * the waitqueue @wq_head is woken up.
709 *
710 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
711 * set thus if other processes wait on the same list, when this
712 * process is woken further processes are not considered.
713 *
714 * wake_up() has to be called after changing any variable that could
715 * change the result of the wait condition.
716 *
717 * Returns:
718 * 0 if the @condition evaluated to %false after the @timeout elapsed,
719 * 1 if the @condition evaluated to %true after the @timeout elapsed,
720 * or the remaining jiffies (at least 1) if the @condition evaluated
721 * to %true before the @timeout elapsed.
722 */
723#define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
724({ \
725 long __ret = timeout; \
726 might_sleep(); \
727 if (!___wait_cond_timeout(condition)) \
728 __ret = __wait_event_idle_exclusive_timeout(wq_head, condition, timeout);\
729 __ret; \
730})
731
732extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
733extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
734
735#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
736({ \
737 int __ret; \
738 DEFINE_WAIT(__wait); \
739 if (exclusive) \
740 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
741 do { \
742 __ret = fn(&(wq), &__wait); \
743 if (__ret) \
744 break; \
745 } while (!(condition)); \
746 __remove_wait_queue(&(wq), &__wait); \
747 __set_current_state(TASK_RUNNING); \
748 __ret; \
749})
750
751
752/**
753 * wait_event_interruptible_locked - sleep until a condition gets true
754 * @wq: the waitqueue to wait on
755 * @condition: a C expression for the event to wait for
756 *
757 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
758 * @condition evaluates to true or a signal is received.
759 * The @condition is checked each time the waitqueue @wq is woken up.
760 *
761 * It must be called with wq.lock being held. This spinlock is
762 * unlocked while sleeping but @condition testing is done while lock
763 * is held and when this macro exits the lock is held.
764 *
765 * The lock is locked/unlocked using spin_lock()/spin_unlock()
766 * functions which must match the way they are locked/unlocked outside
767 * of this macro.
768 *
769 * wake_up_locked() has to be called after changing any variable that could
770 * change the result of the wait condition.
771 *
772 * The function will return -ERESTARTSYS if it was interrupted by a
773 * signal and 0 if @condition evaluated to true.
774 */
775#define wait_event_interruptible_locked(wq, condition) \
776 ((condition) \
777 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
778
779/**
780 * wait_event_interruptible_locked_irq - sleep until a condition gets true
781 * @wq: the waitqueue to wait on
782 * @condition: a C expression for the event to wait for
783 *
784 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
785 * @condition evaluates to true or a signal is received.
786 * The @condition is checked each time the waitqueue @wq is woken up.
787 *
788 * It must be called with wq.lock being held. This spinlock is
789 * unlocked while sleeping but @condition testing is done while lock
790 * is held and when this macro exits the lock is held.
791 *
792 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
793 * functions which must match the way they are locked/unlocked outside
794 * of this macro.
795 *
796 * wake_up_locked() has to be called after changing any variable that could
797 * change the result of the wait condition.
798 *
799 * The function will return -ERESTARTSYS if it was interrupted by a
800 * signal and 0 if @condition evaluated to true.
801 */
802#define wait_event_interruptible_locked_irq(wq, condition) \
803 ((condition) \
804 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
805
806/**
807 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
808 * @wq: the waitqueue to wait on
809 * @condition: a C expression for the event to wait for
810 *
811 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
812 * @condition evaluates to true or a signal is received.
813 * The @condition is checked each time the waitqueue @wq is woken up.
814 *
815 * It must be called with wq.lock being held. This spinlock is
816 * unlocked while sleeping but @condition testing is done while lock
817 * is held and when this macro exits the lock is held.
818 *
819 * The lock is locked/unlocked using spin_lock()/spin_unlock()
820 * functions which must match the way they are locked/unlocked outside
821 * of this macro.
822 *
823 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
824 * set thus when other process waits process on the list if this
825 * process is awaken further processes are not considered.
826 *
827 * wake_up_locked() has to be called after changing any variable that could
828 * change the result of the wait condition.
829 *
830 * The function will return -ERESTARTSYS if it was interrupted by a
831 * signal and 0 if @condition evaluated to true.
832 */
833#define wait_event_interruptible_exclusive_locked(wq, condition) \
834 ((condition) \
835 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
836
837/**
838 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
839 * @wq: the waitqueue to wait on
840 * @condition: a C expression for the event to wait for
841 *
842 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
843 * @condition evaluates to true or a signal is received.
844 * The @condition is checked each time the waitqueue @wq is woken up.
845 *
846 * It must be called with wq.lock being held. This spinlock is
847 * unlocked while sleeping but @condition testing is done while lock
848 * is held and when this macro exits the lock is held.
849 *
850 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
851 * functions which must match the way they are locked/unlocked outside
852 * of this macro.
853 *
854 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
855 * set thus when other process waits process on the list if this
856 * process is awaken further processes are not considered.
857 *
858 * wake_up_locked() has to be called after changing any variable that could
859 * change the result of the wait condition.
860 *
861 * The function will return -ERESTARTSYS if it was interrupted by a
862 * signal and 0 if @condition evaluated to true.
863 */
864#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
865 ((condition) \
866 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
867
868
869#define __wait_event_killable(wq, condition) \
870 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
871
872/**
873 * wait_event_killable - sleep until a condition gets true
874 * @wq_head: the waitqueue to wait on
875 * @condition: a C expression for the event to wait for
876 *
877 * The process is put to sleep (TASK_KILLABLE) until the
878 * @condition evaluates to true or a signal is received.
879 * The @condition is checked each time the waitqueue @wq_head is woken up.
880 *
881 * wake_up() has to be called after changing any variable that could
882 * change the result of the wait condition.
883 *
884 * The function will return -ERESTARTSYS if it was interrupted by a
885 * signal and 0 if @condition evaluated to true.
886 */
887#define wait_event_killable(wq_head, condition) \
888({ \
889 int __ret = 0; \
890 might_sleep(); \
891 if (!(condition)) \
892 __ret = __wait_event_killable(wq_head, condition); \
893 __ret; \
894})
895
896#define __wait_event_killable_timeout(wq_head, condition, timeout) \
897 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
898 TASK_KILLABLE, 0, timeout, \
899 __ret = schedule_timeout(__ret))
900
901/**
902 * wait_event_killable_timeout - sleep until a condition gets true or a timeout elapses
903 * @wq_head: the waitqueue to wait on
904 * @condition: a C expression for the event to wait for
905 * @timeout: timeout, in jiffies
906 *
907 * The process is put to sleep (TASK_KILLABLE) until the
908 * @condition evaluates to true or a kill signal is received.
909 * The @condition is checked each time the waitqueue @wq_head is woken up.
910 *
911 * wake_up() has to be called after changing any variable that could
912 * change the result of the wait condition.
913 *
914 * Returns:
915 * 0 if the @condition evaluated to %false after the @timeout elapsed,
916 * 1 if the @condition evaluated to %true after the @timeout elapsed,
917 * the remaining jiffies (at least 1) if the @condition evaluated
918 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
919 * interrupted by a kill signal.
920 *
921 * Only kill signals interrupt this process.
922 */
923#define wait_event_killable_timeout(wq_head, condition, timeout) \
924({ \
925 long __ret = timeout; \
926 might_sleep(); \
927 if (!___wait_cond_timeout(condition)) \
928 __ret = __wait_event_killable_timeout(wq_head, \
929 condition, timeout); \
930 __ret; \
931})
932
933
934#define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
935 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
936 spin_unlock_irq(&lock); \
937 cmd; \
938 schedule(); \
939 spin_lock_irq(&lock))
940
941/**
942 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
943 * condition is checked under the lock. This
944 * is expected to be called with the lock
945 * taken.
946 * @wq_head: the waitqueue to wait on
947 * @condition: a C expression for the event to wait for
948 * @lock: a locked spinlock_t, which will be released before cmd
949 * and schedule() and reacquired afterwards.
950 * @cmd: a command which is invoked outside the critical section before
951 * sleep
952 *
953 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
954 * @condition evaluates to true. The @condition is checked each time
955 * the waitqueue @wq_head is woken up.
956 *
957 * wake_up() has to be called after changing any variable that could
958 * change the result of the wait condition.
959 *
960 * This is supposed to be called while holding the lock. The lock is
961 * dropped before invoking the cmd and going to sleep and is reacquired
962 * afterwards.
963 */
964#define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
965do { \
966 if (condition) \
967 break; \
968 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
969} while (0)
970
971/**
972 * wait_event_lock_irq - sleep until a condition gets true. The
973 * condition is checked under the lock. This
974 * is expected to be called with the lock
975 * taken.
976 * @wq_head: the waitqueue to wait on
977 * @condition: a C expression for the event to wait for
978 * @lock: a locked spinlock_t, which will be released before schedule()
979 * and reacquired afterwards.
980 *
981 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
982 * @condition evaluates to true. The @condition is checked each time
983 * the waitqueue @wq_head is woken up.
984 *
985 * wake_up() has to be called after changing any variable that could
986 * change the result of the wait condition.
987 *
988 * This is supposed to be called while holding the lock. The lock is
989 * dropped before going to sleep and is reacquired afterwards.
990 */
991#define wait_event_lock_irq(wq_head, condition, lock) \
992do { \
993 if (condition) \
994 break; \
995 __wait_event_lock_irq(wq_head, condition, lock, ); \
996} while (0)
997
998
999#define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
1000 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
1001 spin_unlock_irq(&lock); \
1002 cmd; \
1003 schedule(); \
1004 spin_lock_irq(&lock))
1005
1006/**
1007 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
1008 * The condition is checked under the lock. This is expected to
1009 * be called with the lock taken.
1010 * @wq_head: the waitqueue to wait on
1011 * @condition: a C expression for the event to wait for
1012 * @lock: a locked spinlock_t, which will be released before cmd and
1013 * schedule() and reacquired afterwards.
1014 * @cmd: a command which is invoked outside the critical section before
1015 * sleep
1016 *
1017 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1018 * @condition evaluates to true or a signal is received. The @condition is
1019 * checked each time the waitqueue @wq_head is woken up.
1020 *
1021 * wake_up() has to be called after changing any variable that could
1022 * change the result of the wait condition.
1023 *
1024 * This is supposed to be called while holding the lock. The lock is
1025 * dropped before invoking the cmd and going to sleep and is reacquired
1026 * afterwards.
1027 *
1028 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1029 * and 0 if @condition evaluated to true.
1030 */
1031#define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
1032({ \
1033 int __ret = 0; \
1034 if (!(condition)) \
1035 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1036 condition, lock, cmd); \
1037 __ret; \
1038})
1039
1040/**
1041 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
1042 * The condition is checked under the lock. This is expected
1043 * to be called with the lock taken.
1044 * @wq_head: the waitqueue to wait on
1045 * @condition: a C expression for the event to wait for
1046 * @lock: a locked spinlock_t, which will be released before schedule()
1047 * and reacquired afterwards.
1048 *
1049 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1050 * @condition evaluates to true or signal is received. The @condition is
1051 * checked each time the waitqueue @wq_head is woken up.
1052 *
1053 * wake_up() has to be called after changing any variable that could
1054 * change the result of the wait condition.
1055 *
1056 * This is supposed to be called while holding the lock. The lock is
1057 * dropped before going to sleep and is reacquired afterwards.
1058 *
1059 * The macro will return -ERESTARTSYS if it was interrupted by a signal
1060 * and 0 if @condition evaluated to true.
1061 */
1062#define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
1063({ \
1064 int __ret = 0; \
1065 if (!(condition)) \
1066 __ret = __wait_event_interruptible_lock_irq(wq_head, \
1067 condition, lock,); \
1068 __ret; \
1069})
1070
1071#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
1072 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
1073 state, 0, timeout, \
1074 spin_unlock_irq(&lock); \
1075 __ret = schedule_timeout(__ret); \
1076 spin_lock_irq(&lock));
1077
1078/**
1079 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
1080 * true or a timeout elapses. The condition is checked under
1081 * the lock. This is expected to be called with the lock taken.
1082 * @wq_head: the waitqueue to wait on
1083 * @condition: a C expression for the event to wait for
1084 * @lock: a locked spinlock_t, which will be released before schedule()
1085 * and reacquired afterwards.
1086 * @timeout: timeout, in jiffies
1087 *
1088 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
1089 * @condition evaluates to true or signal is received. The @condition is
1090 * checked each time the waitqueue @wq_head is woken up.
1091 *
1092 * wake_up() has to be called after changing any variable that could
1093 * change the result of the wait condition.
1094 *
1095 * This is supposed to be called while holding the lock. The lock is
1096 * dropped before going to sleep and is reacquired afterwards.
1097 *
1098 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
1099 * was interrupted by a signal, and the remaining jiffies otherwise
1100 * if the condition evaluated to true before the timeout elapsed.
1101 */
1102#define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
1103 timeout) \
1104({ \
1105 long __ret = timeout; \
1106 if (!___wait_cond_timeout(condition)) \
1107 __ret = __wait_event_lock_irq_timeout( \
1108 wq_head, condition, lock, timeout, \
1109 TASK_INTERRUPTIBLE); \
1110 __ret; \
1111})
1112
1113#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
1114({ \
1115 long __ret = timeout; \
1116 if (!___wait_cond_timeout(condition)) \
1117 __ret = __wait_event_lock_irq_timeout( \
1118 wq_head, condition, lock, timeout, \
1119 TASK_UNINTERRUPTIBLE); \
1120 __ret; \
1121})
1122
1123/*
1124 * Waitqueues which are removed from the waitqueue_head at wakeup time
1125 */
1126void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1127void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1128long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
1129void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
1130long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
1131int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1132int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
1133
1134#define DEFINE_WAIT_FUNC(name, function) \
1135 struct wait_queue_entry name = { \
1136 .private = current, \
1137 .func = function, \
1138 .entry = LIST_HEAD_INIT((name).entry), \
1139 }
1140
1141#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
1142
1143#define init_wait(wait) \
1144 do { \
1145 (wait)->private = current; \
1146 (wait)->func = autoremove_wake_function; \
1147 INIT_LIST_HEAD(&(wait)->entry); \
1148 (wait)->flags = 0; \
1149 } while (0)
1150
1151#endif /* _LINUX_WAIT_H */