at v4.8-rc8 42 kB view raw
1#ifndef _LINUX_WAIT_H 2#define _LINUX_WAIT_H 3/* 4 * Linux wait queue related types and methods 5 */ 6#include <linux/list.h> 7#include <linux/stddef.h> 8#include <linux/spinlock.h> 9#include <asm/current.h> 10#include <uapi/linux/wait.h> 11 12typedef struct __wait_queue wait_queue_t; 13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); 14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 15 16/* __wait_queue::flags */ 17#define WQ_FLAG_EXCLUSIVE 0x01 18#define WQ_FLAG_WOKEN 0x02 19 20struct __wait_queue { 21 unsigned int flags; 22 void *private; 23 wait_queue_func_t func; 24 struct list_head task_list; 25}; 26 27struct wait_bit_key { 28 void *flags; 29 int bit_nr; 30#define WAIT_ATOMIC_T_BIT_NR -1 31 unsigned long timeout; 32}; 33 34struct wait_bit_queue { 35 struct wait_bit_key key; 36 wait_queue_t wait; 37}; 38 39struct __wait_queue_head { 40 spinlock_t lock; 41 struct list_head task_list; 42}; 43typedef struct __wait_queue_head wait_queue_head_t; 44 45struct task_struct; 46 47/* 48 * Macros for declaration and initialisaton of the datatypes 49 */ 50 51#define __WAITQUEUE_INITIALIZER(name, tsk) { \ 52 .private = tsk, \ 53 .func = default_wake_function, \ 54 .task_list = { NULL, NULL } } 55 56#define DECLARE_WAITQUEUE(name, tsk) \ 57 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 58 59#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 60 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 61 .task_list = { &(name).task_list, &(name).task_list } } 62 63#define DECLARE_WAIT_QUEUE_HEAD(name) \ 64 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) 65 66#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 67 { .flags = word, .bit_nr = bit, } 68 69#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ 70 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } 71 72extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); 73 74#define init_waitqueue_head(q) \ 75 do { \ 76 static struct lock_class_key __key; \ 77 \ 78 __init_waitqueue_head((q), #q, &__key); \ 79 } while (0) 80 81#ifdef CONFIG_LOCKDEP 82# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 83 ({ init_waitqueue_head(&name); name; }) 84# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ 85 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) 86#else 87# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) 88#endif 89 90static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 91{ 92 q->flags = 0; 93 q->private = p; 94 q->func = default_wake_function; 95} 96 97static inline void 98init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) 99{ 100 q->flags = 0; 101 q->private = NULL; 102 q->func = func; 103} 104 105/** 106 * waitqueue_active -- locklessly test for waiters on the queue 107 * @q: the waitqueue to test for waiters 108 * 109 * returns true if the wait list is not empty 110 * 111 * NOTE: this function is lockless and requires care, incorrect usage _will_ 112 * lead to sporadic and non-obvious failure. 113 * 114 * Use either while holding wait_queue_head_t::lock or when used for wakeups 115 * with an extra smp_mb() like: 116 * 117 * CPU0 - waker CPU1 - waiter 118 * 119 * for (;;) { 120 * @cond = true; prepare_to_wait(&wq, &wait, state); 121 * smp_mb(); // smp_mb() from set_current_state() 122 * if (waitqueue_active(wq)) if (@cond) 123 * wake_up(wq); break; 124 * schedule(); 125 * } 126 * finish_wait(&wq, &wait); 127 * 128 * Because without the explicit smp_mb() it's possible for the 129 * waitqueue_active() load to get hoisted over the @cond store such that we'll 130 * observe an empty wait list while the waiter might not observe @cond. 131 * 132 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(), 133 * which (when the lock is uncontended) are of roughly equal cost. 134 */ 135static inline int waitqueue_active(wait_queue_head_t *q) 136{ 137 return !list_empty(&q->task_list); 138} 139 140/** 141 * wq_has_sleeper - check if there are any waiting processes 142 * @wq: wait queue head 143 * 144 * Returns true if wq has waiting processes 145 * 146 * Please refer to the comment for waitqueue_active. 147 */ 148static inline bool wq_has_sleeper(wait_queue_head_t *wq) 149{ 150 /* 151 * We need to be sure we are in sync with the 152 * add_wait_queue modifications to the wait queue. 153 * 154 * This memory barrier should be paired with one on the 155 * waiting side. 156 */ 157 smp_mb(); 158 return waitqueue_active(wq); 159} 160 161extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 162extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 163extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 164 165static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) 166{ 167 list_add(&new->task_list, &head->task_list); 168} 169 170/* 171 * Used for wake-one threads: 172 */ 173static inline void 174__add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 175{ 176 wait->flags |= WQ_FLAG_EXCLUSIVE; 177 __add_wait_queue(q, wait); 178} 179 180static inline void __add_wait_queue_tail(wait_queue_head_t *head, 181 wait_queue_t *new) 182{ 183 list_add_tail(&new->task_list, &head->task_list); 184} 185 186static inline void 187__add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 188{ 189 wait->flags |= WQ_FLAG_EXCLUSIVE; 190 __add_wait_queue_tail(q, wait); 191} 192 193static inline void 194__remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) 195{ 196 list_del(&old->task_list); 197} 198 199typedef int wait_bit_action_f(struct wait_bit_key *, int mode); 200void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 201void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 202void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 203void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 204void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 205void __wake_up_bit(wait_queue_head_t *, void *, int); 206int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); 207int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); 208void wake_up_bit(void *, int); 209void wake_up_atomic_t(atomic_t *); 210int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); 211int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long); 212int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); 213int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); 214wait_queue_head_t *bit_waitqueue(void *, int); 215 216#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 217#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 218#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 219#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) 220#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) 221 222#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 223#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 224#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 225#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 226 227/* 228 * Wakeup macros to be used to report events to the targets. 229 */ 230#define wake_up_poll(x, m) \ 231 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 232#define wake_up_locked_poll(x, m) \ 233 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 234#define wake_up_interruptible_poll(x, m) \ 235 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 236#define wake_up_interruptible_sync_poll(x, m) \ 237 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 238 239#define ___wait_cond_timeout(condition) \ 240({ \ 241 bool __cond = (condition); \ 242 if (__cond && !__ret) \ 243 __ret = 1; \ 244 __cond || !__ret; \ 245}) 246 247#define ___wait_is_interruptible(state) \ 248 (!__builtin_constant_p(state) || \ 249 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ 250 251/* 252 * The below macro ___wait_event() has an explicit shadow of the __ret 253 * variable when used from the wait_event_*() macros. 254 * 255 * This is so that both can use the ___wait_cond_timeout() construct 256 * to wrap the condition. 257 * 258 * The type inconsistency of the wait_event_*() __ret variable is also 259 * on purpose; we use long where we can return timeout values and int 260 * otherwise. 261 */ 262 263#define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ 264({ \ 265 __label__ __out; \ 266 wait_queue_t __wait; \ 267 long __ret = ret; /* explicit shadow */ \ 268 \ 269 INIT_LIST_HEAD(&__wait.task_list); \ 270 if (exclusive) \ 271 __wait.flags = WQ_FLAG_EXCLUSIVE; \ 272 else \ 273 __wait.flags = 0; \ 274 \ 275 for (;;) { \ 276 long __int = prepare_to_wait_event(&wq, &__wait, state);\ 277 \ 278 if (condition) \ 279 break; \ 280 \ 281 if (___wait_is_interruptible(state) && __int) { \ 282 __ret = __int; \ 283 if (exclusive) { \ 284 abort_exclusive_wait(&wq, &__wait, \ 285 state, NULL); \ 286 goto __out; \ 287 } \ 288 break; \ 289 } \ 290 \ 291 cmd; \ 292 } \ 293 finish_wait(&wq, &__wait); \ 294__out: __ret; \ 295}) 296 297#define __wait_event(wq, condition) \ 298 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 299 schedule()) 300 301/** 302 * wait_event - sleep until a condition gets true 303 * @wq: the waitqueue to wait on 304 * @condition: a C expression for the event to wait for 305 * 306 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 307 * @condition evaluates to true. The @condition is checked each time 308 * the waitqueue @wq is woken up. 309 * 310 * wake_up() has to be called after changing any variable that could 311 * change the result of the wait condition. 312 */ 313#define wait_event(wq, condition) \ 314do { \ 315 might_sleep(); \ 316 if (condition) \ 317 break; \ 318 __wait_event(wq, condition); \ 319} while (0) 320 321#define __io_wait_event(wq, condition) \ 322 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 323 io_schedule()) 324 325/* 326 * io_wait_event() -- like wait_event() but with io_schedule() 327 */ 328#define io_wait_event(wq, condition) \ 329do { \ 330 might_sleep(); \ 331 if (condition) \ 332 break; \ 333 __io_wait_event(wq, condition); \ 334} while (0) 335 336#define __wait_event_freezable(wq, condition) \ 337 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 338 schedule(); try_to_freeze()) 339 340/** 341 * wait_event_freezable - sleep (or freeze) until a condition gets true 342 * @wq: the waitqueue to wait on 343 * @condition: a C expression for the event to wait for 344 * 345 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute 346 * to system load) until the @condition evaluates to true. The 347 * @condition is checked each time the waitqueue @wq is woken up. 348 * 349 * wake_up() has to be called after changing any variable that could 350 * change the result of the wait condition. 351 */ 352#define wait_event_freezable(wq, condition) \ 353({ \ 354 int __ret = 0; \ 355 might_sleep(); \ 356 if (!(condition)) \ 357 __ret = __wait_event_freezable(wq, condition); \ 358 __ret; \ 359}) 360 361#define __wait_event_timeout(wq, condition, timeout) \ 362 ___wait_event(wq, ___wait_cond_timeout(condition), \ 363 TASK_UNINTERRUPTIBLE, 0, timeout, \ 364 __ret = schedule_timeout(__ret)) 365 366/** 367 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 368 * @wq: the waitqueue to wait on 369 * @condition: a C expression for the event to wait for 370 * @timeout: timeout, in jiffies 371 * 372 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 373 * @condition evaluates to true. The @condition is checked each time 374 * the waitqueue @wq is woken up. 375 * 376 * wake_up() has to be called after changing any variable that could 377 * change the result of the wait condition. 378 * 379 * Returns: 380 * 0 if the @condition evaluated to %false after the @timeout elapsed, 381 * 1 if the @condition evaluated to %true after the @timeout elapsed, 382 * or the remaining jiffies (at least 1) if the @condition evaluated 383 * to %true before the @timeout elapsed. 384 */ 385#define wait_event_timeout(wq, condition, timeout) \ 386({ \ 387 long __ret = timeout; \ 388 might_sleep(); \ 389 if (!___wait_cond_timeout(condition)) \ 390 __ret = __wait_event_timeout(wq, condition, timeout); \ 391 __ret; \ 392}) 393 394#define __wait_event_freezable_timeout(wq, condition, timeout) \ 395 ___wait_event(wq, ___wait_cond_timeout(condition), \ 396 TASK_INTERRUPTIBLE, 0, timeout, \ 397 __ret = schedule_timeout(__ret); try_to_freeze()) 398 399/* 400 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid 401 * increasing load and is freezable. 402 */ 403#define wait_event_freezable_timeout(wq, condition, timeout) \ 404({ \ 405 long __ret = timeout; \ 406 might_sleep(); \ 407 if (!___wait_cond_timeout(condition)) \ 408 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \ 409 __ret; \ 410}) 411 412#define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \ 413 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ 414 cmd1; schedule(); cmd2) 415/* 416 * Just like wait_event_cmd(), except it sets exclusive flag 417 */ 418#define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \ 419do { \ 420 if (condition) \ 421 break; \ 422 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \ 423} while (0) 424 425#define __wait_event_cmd(wq, condition, cmd1, cmd2) \ 426 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 427 cmd1; schedule(); cmd2) 428 429/** 430 * wait_event_cmd - sleep until a condition gets true 431 * @wq: the waitqueue to wait on 432 * @condition: a C expression for the event to wait for 433 * @cmd1: the command will be executed before sleep 434 * @cmd2: the command will be executed after sleep 435 * 436 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 437 * @condition evaluates to true. The @condition is checked each time 438 * the waitqueue @wq is woken up. 439 * 440 * wake_up() has to be called after changing any variable that could 441 * change the result of the wait condition. 442 */ 443#define wait_event_cmd(wq, condition, cmd1, cmd2) \ 444do { \ 445 if (condition) \ 446 break; \ 447 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 448} while (0) 449 450#define __wait_event_interruptible(wq, condition) \ 451 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 452 schedule()) 453 454/** 455 * wait_event_interruptible - sleep until a condition gets true 456 * @wq: the waitqueue to wait on 457 * @condition: a C expression for the event to wait for 458 * 459 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 460 * @condition evaluates to true or a signal is received. 461 * The @condition is checked each time the waitqueue @wq is woken up. 462 * 463 * wake_up() has to be called after changing any variable that could 464 * change the result of the wait condition. 465 * 466 * The function will return -ERESTARTSYS if it was interrupted by a 467 * signal and 0 if @condition evaluated to true. 468 */ 469#define wait_event_interruptible(wq, condition) \ 470({ \ 471 int __ret = 0; \ 472 might_sleep(); \ 473 if (!(condition)) \ 474 __ret = __wait_event_interruptible(wq, condition); \ 475 __ret; \ 476}) 477 478#define __wait_event_interruptible_timeout(wq, condition, timeout) \ 479 ___wait_event(wq, ___wait_cond_timeout(condition), \ 480 TASK_INTERRUPTIBLE, 0, timeout, \ 481 __ret = schedule_timeout(__ret)) 482 483/** 484 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 485 * @wq: the waitqueue to wait on 486 * @condition: a C expression for the event to wait for 487 * @timeout: timeout, in jiffies 488 * 489 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 490 * @condition evaluates to true or a signal is received. 491 * The @condition is checked each time the waitqueue @wq is woken up. 492 * 493 * wake_up() has to be called after changing any variable that could 494 * change the result of the wait condition. 495 * 496 * Returns: 497 * 0 if the @condition evaluated to %false after the @timeout elapsed, 498 * 1 if the @condition evaluated to %true after the @timeout elapsed, 499 * the remaining jiffies (at least 1) if the @condition evaluated 500 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was 501 * interrupted by a signal. 502 */ 503#define wait_event_interruptible_timeout(wq, condition, timeout) \ 504({ \ 505 long __ret = timeout; \ 506 might_sleep(); \ 507 if (!___wait_cond_timeout(condition)) \ 508 __ret = __wait_event_interruptible_timeout(wq, \ 509 condition, timeout); \ 510 __ret; \ 511}) 512 513#define __wait_event_hrtimeout(wq, condition, timeout, state) \ 514({ \ 515 int __ret = 0; \ 516 struct hrtimer_sleeper __t; \ 517 \ 518 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, \ 519 HRTIMER_MODE_REL); \ 520 hrtimer_init_sleeper(&__t, current); \ 521 if ((timeout).tv64 != KTIME_MAX) \ 522 hrtimer_start_range_ns(&__t.timer, timeout, \ 523 current->timer_slack_ns, \ 524 HRTIMER_MODE_REL); \ 525 \ 526 __ret = ___wait_event(wq, condition, state, 0, 0, \ 527 if (!__t.task) { \ 528 __ret = -ETIME; \ 529 break; \ 530 } \ 531 schedule()); \ 532 \ 533 hrtimer_cancel(&__t.timer); \ 534 destroy_hrtimer_on_stack(&__t.timer); \ 535 __ret; \ 536}) 537 538/** 539 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses 540 * @wq: the waitqueue to wait on 541 * @condition: a C expression for the event to wait for 542 * @timeout: timeout, as a ktime_t 543 * 544 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 545 * @condition evaluates to true or a signal is received. 546 * The @condition is checked each time the waitqueue @wq is woken up. 547 * 548 * wake_up() has to be called after changing any variable that could 549 * change the result of the wait condition. 550 * 551 * The function returns 0 if @condition became true, or -ETIME if the timeout 552 * elapsed. 553 */ 554#define wait_event_hrtimeout(wq, condition, timeout) \ 555({ \ 556 int __ret = 0; \ 557 might_sleep(); \ 558 if (!(condition)) \ 559 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 560 TASK_UNINTERRUPTIBLE); \ 561 __ret; \ 562}) 563 564/** 565 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses 566 * @wq: the waitqueue to wait on 567 * @condition: a C expression for the event to wait for 568 * @timeout: timeout, as a ktime_t 569 * 570 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 571 * @condition evaluates to true or a signal is received. 572 * The @condition is checked each time the waitqueue @wq is woken up. 573 * 574 * wake_up() has to be called after changing any variable that could 575 * change the result of the wait condition. 576 * 577 * The function returns 0 if @condition became true, -ERESTARTSYS if it was 578 * interrupted by a signal, or -ETIME if the timeout elapsed. 579 */ 580#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ 581({ \ 582 long __ret = 0; \ 583 might_sleep(); \ 584 if (!(condition)) \ 585 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 586 TASK_INTERRUPTIBLE); \ 587 __ret; \ 588}) 589 590#define __wait_event_interruptible_exclusive(wq, condition) \ 591 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 592 schedule()) 593 594#define wait_event_interruptible_exclusive(wq, condition) \ 595({ \ 596 int __ret = 0; \ 597 might_sleep(); \ 598 if (!(condition)) \ 599 __ret = __wait_event_interruptible_exclusive(wq, condition);\ 600 __ret; \ 601}) 602 603#define __wait_event_killable_exclusive(wq, condition) \ 604 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ 605 schedule()) 606 607#define wait_event_killable_exclusive(wq, condition) \ 608({ \ 609 int __ret = 0; \ 610 might_sleep(); \ 611 if (!(condition)) \ 612 __ret = __wait_event_killable_exclusive(wq, condition); \ 613 __ret; \ 614}) 615 616 617#define __wait_event_freezable_exclusive(wq, condition) \ 618 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 619 schedule(); try_to_freeze()) 620 621#define wait_event_freezable_exclusive(wq, condition) \ 622({ \ 623 int __ret = 0; \ 624 might_sleep(); \ 625 if (!(condition)) \ 626 __ret = __wait_event_freezable_exclusive(wq, condition);\ 627 __ret; \ 628}) 629 630 631#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 632({ \ 633 int __ret = 0; \ 634 DEFINE_WAIT(__wait); \ 635 if (exclusive) \ 636 __wait.flags |= WQ_FLAG_EXCLUSIVE; \ 637 do { \ 638 if (likely(list_empty(&__wait.task_list))) \ 639 __add_wait_queue_tail(&(wq), &__wait); \ 640 set_current_state(TASK_INTERRUPTIBLE); \ 641 if (signal_pending(current)) { \ 642 __ret = -ERESTARTSYS; \ 643 break; \ 644 } \ 645 if (irq) \ 646 spin_unlock_irq(&(wq).lock); \ 647 else \ 648 spin_unlock(&(wq).lock); \ 649 schedule(); \ 650 if (irq) \ 651 spin_lock_irq(&(wq).lock); \ 652 else \ 653 spin_lock(&(wq).lock); \ 654 } while (!(condition)); \ 655 __remove_wait_queue(&(wq), &__wait); \ 656 __set_current_state(TASK_RUNNING); \ 657 __ret; \ 658}) 659 660 661/** 662 * wait_event_interruptible_locked - sleep until a condition gets true 663 * @wq: the waitqueue to wait on 664 * @condition: a C expression for the event to wait for 665 * 666 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 667 * @condition evaluates to true or a signal is received. 668 * The @condition is checked each time the waitqueue @wq is woken up. 669 * 670 * It must be called with wq.lock being held. This spinlock is 671 * unlocked while sleeping but @condition testing is done while lock 672 * is held and when this macro exits the lock is held. 673 * 674 * The lock is locked/unlocked using spin_lock()/spin_unlock() 675 * functions which must match the way they are locked/unlocked outside 676 * of this macro. 677 * 678 * wake_up_locked() has to be called after changing any variable that could 679 * change the result of the wait condition. 680 * 681 * The function will return -ERESTARTSYS if it was interrupted by a 682 * signal and 0 if @condition evaluated to true. 683 */ 684#define wait_event_interruptible_locked(wq, condition) \ 685 ((condition) \ 686 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) 687 688/** 689 * wait_event_interruptible_locked_irq - sleep until a condition gets true 690 * @wq: the waitqueue to wait on 691 * @condition: a C expression for the event to wait for 692 * 693 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 694 * @condition evaluates to true or a signal is received. 695 * The @condition is checked each time the waitqueue @wq is woken up. 696 * 697 * It must be called with wq.lock being held. This spinlock is 698 * unlocked while sleeping but @condition testing is done while lock 699 * is held and when this macro exits the lock is held. 700 * 701 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 702 * functions which must match the way they are locked/unlocked outside 703 * of this macro. 704 * 705 * wake_up_locked() has to be called after changing any variable that could 706 * change the result of the wait condition. 707 * 708 * The function will return -ERESTARTSYS if it was interrupted by a 709 * signal and 0 if @condition evaluated to true. 710 */ 711#define wait_event_interruptible_locked_irq(wq, condition) \ 712 ((condition) \ 713 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) 714 715/** 716 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true 717 * @wq: the waitqueue to wait on 718 * @condition: a C expression for the event to wait for 719 * 720 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 721 * @condition evaluates to true or a signal is received. 722 * The @condition is checked each time the waitqueue @wq is woken up. 723 * 724 * It must be called with wq.lock being held. This spinlock is 725 * unlocked while sleeping but @condition testing is done while lock 726 * is held and when this macro exits the lock is held. 727 * 728 * The lock is locked/unlocked using spin_lock()/spin_unlock() 729 * functions which must match the way they are locked/unlocked outside 730 * of this macro. 731 * 732 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 733 * set thus when other process waits process on the list if this 734 * process is awaken further processes are not considered. 735 * 736 * wake_up_locked() has to be called after changing any variable that could 737 * change the result of the wait condition. 738 * 739 * The function will return -ERESTARTSYS if it was interrupted by a 740 * signal and 0 if @condition evaluated to true. 741 */ 742#define wait_event_interruptible_exclusive_locked(wq, condition) \ 743 ((condition) \ 744 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) 745 746/** 747 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true 748 * @wq: the waitqueue to wait on 749 * @condition: a C expression for the event to wait for 750 * 751 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 752 * @condition evaluates to true or a signal is received. 753 * The @condition is checked each time the waitqueue @wq is woken up. 754 * 755 * It must be called with wq.lock being held. This spinlock is 756 * unlocked while sleeping but @condition testing is done while lock 757 * is held and when this macro exits the lock is held. 758 * 759 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 760 * functions which must match the way they are locked/unlocked outside 761 * of this macro. 762 * 763 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 764 * set thus when other process waits process on the list if this 765 * process is awaken further processes are not considered. 766 * 767 * wake_up_locked() has to be called after changing any variable that could 768 * change the result of the wait condition. 769 * 770 * The function will return -ERESTARTSYS if it was interrupted by a 771 * signal and 0 if @condition evaluated to true. 772 */ 773#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ 774 ((condition) \ 775 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 776 777 778#define __wait_event_killable(wq, condition) \ 779 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) 780 781/** 782 * wait_event_killable - sleep until a condition gets true 783 * @wq: the waitqueue to wait on 784 * @condition: a C expression for the event to wait for 785 * 786 * The process is put to sleep (TASK_KILLABLE) until the 787 * @condition evaluates to true or a signal is received. 788 * The @condition is checked each time the waitqueue @wq is woken up. 789 * 790 * wake_up() has to be called after changing any variable that could 791 * change the result of the wait condition. 792 * 793 * The function will return -ERESTARTSYS if it was interrupted by a 794 * signal and 0 if @condition evaluated to true. 795 */ 796#define wait_event_killable(wq, condition) \ 797({ \ 798 int __ret = 0; \ 799 might_sleep(); \ 800 if (!(condition)) \ 801 __ret = __wait_event_killable(wq, condition); \ 802 __ret; \ 803}) 804 805 806#define __wait_event_lock_irq(wq, condition, lock, cmd) \ 807 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 808 spin_unlock_irq(&lock); \ 809 cmd; \ 810 schedule(); \ 811 spin_lock_irq(&lock)) 812 813/** 814 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 815 * condition is checked under the lock. This 816 * is expected to be called with the lock 817 * taken. 818 * @wq: the waitqueue to wait on 819 * @condition: a C expression for the event to wait for 820 * @lock: a locked spinlock_t, which will be released before cmd 821 * and schedule() and reacquired afterwards. 822 * @cmd: a command which is invoked outside the critical section before 823 * sleep 824 * 825 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 826 * @condition evaluates to true. The @condition is checked each time 827 * the waitqueue @wq is woken up. 828 * 829 * wake_up() has to be called after changing any variable that could 830 * change the result of the wait condition. 831 * 832 * This is supposed to be called while holding the lock. The lock is 833 * dropped before invoking the cmd and going to sleep and is reacquired 834 * afterwards. 835 */ 836#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \ 837do { \ 838 if (condition) \ 839 break; \ 840 __wait_event_lock_irq(wq, condition, lock, cmd); \ 841} while (0) 842 843/** 844 * wait_event_lock_irq - sleep until a condition gets true. The 845 * condition is checked under the lock. This 846 * is expected to be called with the lock 847 * taken. 848 * @wq: the waitqueue to wait on 849 * @condition: a C expression for the event to wait for 850 * @lock: a locked spinlock_t, which will be released before schedule() 851 * and reacquired afterwards. 852 * 853 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 854 * @condition evaluates to true. The @condition is checked each time 855 * the waitqueue @wq is woken up. 856 * 857 * wake_up() has to be called after changing any variable that could 858 * change the result of the wait condition. 859 * 860 * This is supposed to be called while holding the lock. The lock is 861 * dropped before going to sleep and is reacquired afterwards. 862 */ 863#define wait_event_lock_irq(wq, condition, lock) \ 864do { \ 865 if (condition) \ 866 break; \ 867 __wait_event_lock_irq(wq, condition, lock, ); \ 868} while (0) 869 870 871#define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \ 872 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 873 spin_unlock_irq(&lock); \ 874 cmd; \ 875 schedule(); \ 876 spin_lock_irq(&lock)) 877 878/** 879 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. 880 * The condition is checked under the lock. This is expected to 881 * be called with the lock taken. 882 * @wq: the waitqueue to wait on 883 * @condition: a C expression for the event to wait for 884 * @lock: a locked spinlock_t, which will be released before cmd and 885 * schedule() and reacquired afterwards. 886 * @cmd: a command which is invoked outside the critical section before 887 * sleep 888 * 889 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 890 * @condition evaluates to true or a signal is received. The @condition is 891 * checked each time the waitqueue @wq is woken up. 892 * 893 * wake_up() has to be called after changing any variable that could 894 * change the result of the wait condition. 895 * 896 * This is supposed to be called while holding the lock. The lock is 897 * dropped before invoking the cmd and going to sleep and is reacquired 898 * afterwards. 899 * 900 * The macro will return -ERESTARTSYS if it was interrupted by a signal 901 * and 0 if @condition evaluated to true. 902 */ 903#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 904({ \ 905 int __ret = 0; \ 906 if (!(condition)) \ 907 __ret = __wait_event_interruptible_lock_irq(wq, \ 908 condition, lock, cmd); \ 909 __ret; \ 910}) 911 912/** 913 * wait_event_interruptible_lock_irq - sleep until a condition gets true. 914 * The condition is checked under the lock. This is expected 915 * to be called with the lock taken. 916 * @wq: the waitqueue to wait on 917 * @condition: a C expression for the event to wait for 918 * @lock: a locked spinlock_t, which will be released before schedule() 919 * and reacquired afterwards. 920 * 921 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 922 * @condition evaluates to true or signal is received. The @condition is 923 * checked each time the waitqueue @wq is woken up. 924 * 925 * wake_up() has to be called after changing any variable that could 926 * change the result of the wait condition. 927 * 928 * This is supposed to be called while holding the lock. The lock is 929 * dropped before going to sleep and is reacquired afterwards. 930 * 931 * The macro will return -ERESTARTSYS if it was interrupted by a signal 932 * and 0 if @condition evaluated to true. 933 */ 934#define wait_event_interruptible_lock_irq(wq, condition, lock) \ 935({ \ 936 int __ret = 0; \ 937 if (!(condition)) \ 938 __ret = __wait_event_interruptible_lock_irq(wq, \ 939 condition, lock,); \ 940 __ret; \ 941}) 942 943#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ 944 lock, timeout) \ 945 ___wait_event(wq, ___wait_cond_timeout(condition), \ 946 TASK_INTERRUPTIBLE, 0, timeout, \ 947 spin_unlock_irq(&lock); \ 948 __ret = schedule_timeout(__ret); \ 949 spin_lock_irq(&lock)); 950 951/** 952 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets 953 * true or a timeout elapses. The condition is checked under 954 * the lock. This is expected to be called with the lock taken. 955 * @wq: the waitqueue to wait on 956 * @condition: a C expression for the event to wait for 957 * @lock: a locked spinlock_t, which will be released before schedule() 958 * and reacquired afterwards. 959 * @timeout: timeout, in jiffies 960 * 961 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 962 * @condition evaluates to true or signal is received. The @condition is 963 * checked each time the waitqueue @wq is woken up. 964 * 965 * wake_up() has to be called after changing any variable that could 966 * change the result of the wait condition. 967 * 968 * This is supposed to be called while holding the lock. The lock is 969 * dropped before going to sleep and is reacquired afterwards. 970 * 971 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 972 * was interrupted by a signal, and the remaining jiffies otherwise 973 * if the condition evaluated to true before the timeout elapsed. 974 */ 975#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 976 timeout) \ 977({ \ 978 long __ret = timeout; \ 979 if (!___wait_cond_timeout(condition)) \ 980 __ret = __wait_event_interruptible_lock_irq_timeout( \ 981 wq, condition, lock, timeout); \ 982 __ret; \ 983}) 984 985/* 986 * Waitqueues which are removed from the waitqueue_head at wakeup time 987 */ 988void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 989void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 990long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); 991void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 992void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); 993long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); 994int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 995int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 996int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 997 998#define DEFINE_WAIT_FUNC(name, function) \ 999 wait_queue_t name = { \ 1000 .private = current, \ 1001 .func = function, \ 1002 .task_list = LIST_HEAD_INIT((name).task_list), \ 1003 } 1004 1005#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) 1006 1007#define DEFINE_WAIT_BIT(name, word, bit) \ 1008 struct wait_bit_queue name = { \ 1009 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ 1010 .wait = { \ 1011 .private = current, \ 1012 .func = wake_bit_function, \ 1013 .task_list = \ 1014 LIST_HEAD_INIT((name).wait.task_list), \ 1015 }, \ 1016 } 1017 1018#define init_wait(wait) \ 1019 do { \ 1020 (wait)->private = current; \ 1021 (wait)->func = autoremove_wake_function; \ 1022 INIT_LIST_HEAD(&(wait)->task_list); \ 1023 (wait)->flags = 0; \ 1024 } while (0) 1025 1026 1027extern int bit_wait(struct wait_bit_key *, int); 1028extern int bit_wait_io(struct wait_bit_key *, int); 1029extern int bit_wait_timeout(struct wait_bit_key *, int); 1030extern int bit_wait_io_timeout(struct wait_bit_key *, int); 1031 1032/** 1033 * wait_on_bit - wait for a bit to be cleared 1034 * @word: the word being waited on, a kernel virtual address 1035 * @bit: the bit of the word being waited on 1036 * @mode: the task state to sleep in 1037 * 1038 * There is a standard hashed waitqueue table for generic use. This 1039 * is the part of the hashtable's accessor API that waits on a bit. 1040 * For instance, if one were to have waiters on a bitflag, one would 1041 * call wait_on_bit() in threads waiting for the bit to clear. 1042 * One uses wait_on_bit() where one is waiting for the bit to clear, 1043 * but has no intention of setting it. 1044 * Returned value will be zero if the bit was cleared, or non-zero 1045 * if the process received a signal and the mode permitted wakeup 1046 * on that signal. 1047 */ 1048static inline int 1049wait_on_bit(unsigned long *word, int bit, unsigned mode) 1050{ 1051 might_sleep(); 1052 if (!test_bit(bit, word)) 1053 return 0; 1054 return out_of_line_wait_on_bit(word, bit, 1055 bit_wait, 1056 mode); 1057} 1058 1059/** 1060 * wait_on_bit_io - wait for a bit to be cleared 1061 * @word: the word being waited on, a kernel virtual address 1062 * @bit: the bit of the word being waited on 1063 * @mode: the task state to sleep in 1064 * 1065 * Use the standard hashed waitqueue table to wait for a bit 1066 * to be cleared. This is similar to wait_on_bit(), but calls 1067 * io_schedule() instead of schedule() for the actual waiting. 1068 * 1069 * Returned value will be zero if the bit was cleared, or non-zero 1070 * if the process received a signal and the mode permitted wakeup 1071 * on that signal. 1072 */ 1073static inline int 1074wait_on_bit_io(unsigned long *word, int bit, unsigned mode) 1075{ 1076 might_sleep(); 1077 if (!test_bit(bit, word)) 1078 return 0; 1079 return out_of_line_wait_on_bit(word, bit, 1080 bit_wait_io, 1081 mode); 1082} 1083 1084/** 1085 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses 1086 * @word: the word being waited on, a kernel virtual address 1087 * @bit: the bit of the word being waited on 1088 * @mode: the task state to sleep in 1089 * @timeout: timeout, in jiffies 1090 * 1091 * Use the standard hashed waitqueue table to wait for a bit 1092 * to be cleared. This is similar to wait_on_bit(), except also takes a 1093 * timeout parameter. 1094 * 1095 * Returned value will be zero if the bit was cleared before the 1096 * @timeout elapsed, or non-zero if the @timeout elapsed or process 1097 * received a signal and the mode permitted wakeup on that signal. 1098 */ 1099static inline int 1100wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, 1101 unsigned long timeout) 1102{ 1103 might_sleep(); 1104 if (!test_bit(bit, word)) 1105 return 0; 1106 return out_of_line_wait_on_bit_timeout(word, bit, 1107 bit_wait_timeout, 1108 mode, timeout); 1109} 1110 1111/** 1112 * wait_on_bit_action - wait for a bit to be cleared 1113 * @word: the word being waited on, a kernel virtual address 1114 * @bit: the bit of the word being waited on 1115 * @action: the function used to sleep, which may take special actions 1116 * @mode: the task state to sleep in 1117 * 1118 * Use the standard hashed waitqueue table to wait for a bit 1119 * to be cleared, and allow the waiting action to be specified. 1120 * This is like wait_on_bit() but allows fine control of how the waiting 1121 * is done. 1122 * 1123 * Returned value will be zero if the bit was cleared, or non-zero 1124 * if the process received a signal and the mode permitted wakeup 1125 * on that signal. 1126 */ 1127static inline int 1128wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, 1129 unsigned mode) 1130{ 1131 might_sleep(); 1132 if (!test_bit(bit, word)) 1133 return 0; 1134 return out_of_line_wait_on_bit(word, bit, action, mode); 1135} 1136 1137/** 1138 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it 1139 * @word: the word being waited on, a kernel virtual address 1140 * @bit: the bit of the word being waited on 1141 * @mode: the task state to sleep in 1142 * 1143 * There is a standard hashed waitqueue table for generic use. This 1144 * is the part of the hashtable's accessor API that waits on a bit 1145 * when one intends to set it, for instance, trying to lock bitflags. 1146 * For instance, if one were to have waiters trying to set bitflag 1147 * and waiting for it to clear before setting it, one would call 1148 * wait_on_bit() in threads waiting to be able to set the bit. 1149 * One uses wait_on_bit_lock() where one is waiting for the bit to 1150 * clear with the intention of setting it, and when done, clearing it. 1151 * 1152 * Returns zero if the bit was (eventually) found to be clear and was 1153 * set. Returns non-zero if a signal was delivered to the process and 1154 * the @mode allows that signal to wake the process. 1155 */ 1156static inline int 1157wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) 1158{ 1159 might_sleep(); 1160 if (!test_and_set_bit(bit, word)) 1161 return 0; 1162 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); 1163} 1164 1165/** 1166 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it 1167 * @word: the word being waited on, a kernel virtual address 1168 * @bit: the bit of the word being waited on 1169 * @mode: the task state to sleep in 1170 * 1171 * Use the standard hashed waitqueue table to wait for a bit 1172 * to be cleared and then to atomically set it. This is similar 1173 * to wait_on_bit(), but calls io_schedule() instead of schedule() 1174 * for the actual waiting. 1175 * 1176 * Returns zero if the bit was (eventually) found to be clear and was 1177 * set. Returns non-zero if a signal was delivered to the process and 1178 * the @mode allows that signal to wake the process. 1179 */ 1180static inline int 1181wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) 1182{ 1183 might_sleep(); 1184 if (!test_and_set_bit(bit, word)) 1185 return 0; 1186 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); 1187} 1188 1189/** 1190 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it 1191 * @word: the word being waited on, a kernel virtual address 1192 * @bit: the bit of the word being waited on 1193 * @action: the function used to sleep, which may take special actions 1194 * @mode: the task state to sleep in 1195 * 1196 * Use the standard hashed waitqueue table to wait for a bit 1197 * to be cleared and then to set it, and allow the waiting action 1198 * to be specified. 1199 * This is like wait_on_bit() but allows fine control of how the waiting 1200 * is done. 1201 * 1202 * Returns zero if the bit was (eventually) found to be clear and was 1203 * set. Returns non-zero if a signal was delivered to the process and 1204 * the @mode allows that signal to wake the process. 1205 */ 1206static inline int 1207wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, 1208 unsigned mode) 1209{ 1210 might_sleep(); 1211 if (!test_and_set_bit(bit, word)) 1212 return 0; 1213 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 1214} 1215 1216/** 1217 * wait_on_atomic_t - Wait for an atomic_t to become 0 1218 * @val: The atomic value being waited on, a kernel virtual address 1219 * @action: the function used to sleep, which may take special actions 1220 * @mode: the task state to sleep in 1221 * 1222 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for 1223 * the purpose of getting a waitqueue, but we set the key to a bit number 1224 * outside of the target 'word'. 1225 */ 1226static inline 1227int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) 1228{ 1229 might_sleep(); 1230 if (atomic_read(val) == 0) 1231 return 0; 1232 return out_of_line_wait_on_atomic_t(val, action, mode); 1233} 1234 1235#endif /* _LINUX_WAIT_H */