at v3.8 27 kB view raw
1#ifndef _LINUX_WAIT_H 2#define _LINUX_WAIT_H 3 4 5#include <linux/list.h> 6#include <linux/stddef.h> 7#include <linux/spinlock.h> 8#include <asm/current.h> 9#include <uapi/linux/wait.h> 10 11typedef struct __wait_queue wait_queue_t; 12typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); 13int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14 15struct __wait_queue { 16 unsigned int flags; 17#define WQ_FLAG_EXCLUSIVE 0x01 18 void *private; 19 wait_queue_func_t func; 20 struct list_head task_list; 21}; 22 23struct wait_bit_key { 24 void *flags; 25 int bit_nr; 26}; 27 28struct wait_bit_queue { 29 struct wait_bit_key key; 30 wait_queue_t wait; 31}; 32 33struct __wait_queue_head { 34 spinlock_t lock; 35 struct list_head task_list; 36}; 37typedef struct __wait_queue_head wait_queue_head_t; 38 39struct task_struct; 40 41/* 42 * Macros for declaration and initialisaton of the datatypes 43 */ 44 45#define __WAITQUEUE_INITIALIZER(name, tsk) { \ 46 .private = tsk, \ 47 .func = default_wake_function, \ 48 .task_list = { NULL, NULL } } 49 50#define DECLARE_WAITQUEUE(name, tsk) \ 51 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 52 53#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 54 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 55 .task_list = { &(name).task_list, &(name).task_list } } 56 57#define DECLARE_WAIT_QUEUE_HEAD(name) \ 58 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) 59 60#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 61 { .flags = word, .bit_nr = bit, } 62 63extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); 64 65#define init_waitqueue_head(q) \ 66 do { \ 67 static struct lock_class_key __key; \ 68 \ 69 __init_waitqueue_head((q), #q, &__key); \ 70 } while (0) 71 72#ifdef CONFIG_LOCKDEP 73# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 74 ({ init_waitqueue_head(&name); name; }) 75# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ 76 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) 77#else 78# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) 79#endif 80 81static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 82{ 83 q->flags = 0; 84 q->private = p; 85 q->func = default_wake_function; 86} 87 88static inline void init_waitqueue_func_entry(wait_queue_t *q, 89 wait_queue_func_t func) 90{ 91 q->flags = 0; 92 q->private = NULL; 93 q->func = func; 94} 95 96static inline int waitqueue_active(wait_queue_head_t *q) 97{ 98 return !list_empty(&q->task_list); 99} 100 101extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 102extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 103extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 104 105static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) 106{ 107 list_add(&new->task_list, &head->task_list); 108} 109 110/* 111 * Used for wake-one threads: 112 */ 113static inline void __add_wait_queue_exclusive(wait_queue_head_t *q, 114 wait_queue_t *wait) 115{ 116 wait->flags |= WQ_FLAG_EXCLUSIVE; 117 __add_wait_queue(q, wait); 118} 119 120static inline void __add_wait_queue_tail(wait_queue_head_t *head, 121 wait_queue_t *new) 122{ 123 list_add_tail(&new->task_list, &head->task_list); 124} 125 126static inline void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, 127 wait_queue_t *wait) 128{ 129 wait->flags |= WQ_FLAG_EXCLUSIVE; 130 __add_wait_queue_tail(q, wait); 131} 132 133static inline void __remove_wait_queue(wait_queue_head_t *head, 134 wait_queue_t *old) 135{ 136 list_del(&old->task_list); 137} 138 139void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 140void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 141void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, 142 void *key); 143void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); 144void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 145void __wake_up_bit(wait_queue_head_t *, void *, int); 146int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 147int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 148void wake_up_bit(void *, int); 149int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); 150int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); 151wait_queue_head_t *bit_waitqueue(void *, int); 152 153#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 154#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 155#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 156#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) 157#define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) 158 159#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 160#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 161#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 162#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 163 164/* 165 * Wakeup macros to be used to report events to the targets. 166 */ 167#define wake_up_poll(x, m) \ 168 __wake_up(x, TASK_NORMAL, 1, (void *) (m)) 169#define wake_up_locked_poll(x, m) \ 170 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) 171#define wake_up_interruptible_poll(x, m) \ 172 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) 173#define wake_up_interruptible_sync_poll(x, m) \ 174 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) 175 176#define __wait_event(wq, condition) \ 177do { \ 178 DEFINE_WAIT(__wait); \ 179 \ 180 for (;;) { \ 181 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 182 if (condition) \ 183 break; \ 184 schedule(); \ 185 } \ 186 finish_wait(&wq, &__wait); \ 187} while (0) 188 189/** 190 * wait_event - sleep until a condition gets true 191 * @wq: the waitqueue to wait on 192 * @condition: a C expression for the event to wait for 193 * 194 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 195 * @condition evaluates to true. The @condition is checked each time 196 * the waitqueue @wq is woken up. 197 * 198 * wake_up() has to be called after changing any variable that could 199 * change the result of the wait condition. 200 */ 201#define wait_event(wq, condition) \ 202do { \ 203 if (condition) \ 204 break; \ 205 __wait_event(wq, condition); \ 206} while (0) 207 208#define __wait_event_timeout(wq, condition, ret) \ 209do { \ 210 DEFINE_WAIT(__wait); \ 211 \ 212 for (;;) { \ 213 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 214 if (condition) \ 215 break; \ 216 ret = schedule_timeout(ret); \ 217 if (!ret) \ 218 break; \ 219 } \ 220 finish_wait(&wq, &__wait); \ 221} while (0) 222 223/** 224 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 225 * @wq: the waitqueue to wait on 226 * @condition: a C expression for the event to wait for 227 * @timeout: timeout, in jiffies 228 * 229 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 230 * @condition evaluates to true. The @condition is checked each time 231 * the waitqueue @wq is woken up. 232 * 233 * wake_up() has to be called after changing any variable that could 234 * change the result of the wait condition. 235 * 236 * The function returns 0 if the @timeout elapsed, and the remaining 237 * jiffies if the condition evaluated to true before the timeout elapsed. 238 */ 239#define wait_event_timeout(wq, condition, timeout) \ 240({ \ 241 long __ret = timeout; \ 242 if (!(condition)) \ 243 __wait_event_timeout(wq, condition, __ret); \ 244 __ret; \ 245}) 246 247#define __wait_event_interruptible(wq, condition, ret) \ 248do { \ 249 DEFINE_WAIT(__wait); \ 250 \ 251 for (;;) { \ 252 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 253 if (condition) \ 254 break; \ 255 if (!signal_pending(current)) { \ 256 schedule(); \ 257 continue; \ 258 } \ 259 ret = -ERESTARTSYS; \ 260 break; \ 261 } \ 262 finish_wait(&wq, &__wait); \ 263} while (0) 264 265/** 266 * wait_event_interruptible - sleep until a condition gets true 267 * @wq: the waitqueue to wait on 268 * @condition: a C expression for the event to wait for 269 * 270 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 271 * @condition evaluates to true or a signal is received. 272 * The @condition is checked each time the waitqueue @wq is woken up. 273 * 274 * wake_up() has to be called after changing any variable that could 275 * change the result of the wait condition. 276 * 277 * The function will return -ERESTARTSYS if it was interrupted by a 278 * signal and 0 if @condition evaluated to true. 279 */ 280#define wait_event_interruptible(wq, condition) \ 281({ \ 282 int __ret = 0; \ 283 if (!(condition)) \ 284 __wait_event_interruptible(wq, condition, __ret); \ 285 __ret; \ 286}) 287 288#define __wait_event_interruptible_timeout(wq, condition, ret) \ 289do { \ 290 DEFINE_WAIT(__wait); \ 291 \ 292 for (;;) { \ 293 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 294 if (condition) \ 295 break; \ 296 if (!signal_pending(current)) { \ 297 ret = schedule_timeout(ret); \ 298 if (!ret) \ 299 break; \ 300 continue; \ 301 } \ 302 ret = -ERESTARTSYS; \ 303 break; \ 304 } \ 305 finish_wait(&wq, &__wait); \ 306} while (0) 307 308/** 309 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 310 * @wq: the waitqueue to wait on 311 * @condition: a C expression for the event to wait for 312 * @timeout: timeout, in jiffies 313 * 314 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 315 * @condition evaluates to true or a signal is received. 316 * The @condition is checked each time the waitqueue @wq is woken up. 317 * 318 * wake_up() has to be called after changing any variable that could 319 * change the result of the wait condition. 320 * 321 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 322 * was interrupted by a signal, and the remaining jiffies otherwise 323 * if the condition evaluated to true before the timeout elapsed. 324 */ 325#define wait_event_interruptible_timeout(wq, condition, timeout) \ 326({ \ 327 long __ret = timeout; \ 328 if (!(condition)) \ 329 __wait_event_interruptible_timeout(wq, condition, __ret); \ 330 __ret; \ 331}) 332 333#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 334do { \ 335 DEFINE_WAIT(__wait); \ 336 \ 337 for (;;) { \ 338 prepare_to_wait_exclusive(&wq, &__wait, \ 339 TASK_INTERRUPTIBLE); \ 340 if (condition) { \ 341 finish_wait(&wq, &__wait); \ 342 break; \ 343 } \ 344 if (!signal_pending(current)) { \ 345 schedule(); \ 346 continue; \ 347 } \ 348 ret = -ERESTARTSYS; \ 349 abort_exclusive_wait(&wq, &__wait, \ 350 TASK_INTERRUPTIBLE, NULL); \ 351 break; \ 352 } \ 353} while (0) 354 355#define wait_event_interruptible_exclusive(wq, condition) \ 356({ \ 357 int __ret = 0; \ 358 if (!(condition)) \ 359 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 360 __ret; \ 361}) 362 363 364#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 365({ \ 366 int __ret = 0; \ 367 DEFINE_WAIT(__wait); \ 368 if (exclusive) \ 369 __wait.flags |= WQ_FLAG_EXCLUSIVE; \ 370 do { \ 371 if (likely(list_empty(&__wait.task_list))) \ 372 __add_wait_queue_tail(&(wq), &__wait); \ 373 set_current_state(TASK_INTERRUPTIBLE); \ 374 if (signal_pending(current)) { \ 375 __ret = -ERESTARTSYS; \ 376 break; \ 377 } \ 378 if (irq) \ 379 spin_unlock_irq(&(wq).lock); \ 380 else \ 381 spin_unlock(&(wq).lock); \ 382 schedule(); \ 383 if (irq) \ 384 spin_lock_irq(&(wq).lock); \ 385 else \ 386 spin_lock(&(wq).lock); \ 387 } while (!(condition)); \ 388 __remove_wait_queue(&(wq), &__wait); \ 389 __set_current_state(TASK_RUNNING); \ 390 __ret; \ 391}) 392 393 394/** 395 * wait_event_interruptible_locked - sleep until a condition gets true 396 * @wq: the waitqueue to wait on 397 * @condition: a C expression for the event to wait for 398 * 399 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 400 * @condition evaluates to true or a signal is received. 401 * The @condition is checked each time the waitqueue @wq is woken up. 402 * 403 * It must be called with wq.lock being held. This spinlock is 404 * unlocked while sleeping but @condition testing is done while lock 405 * is held and when this macro exits the lock is held. 406 * 407 * The lock is locked/unlocked using spin_lock()/spin_unlock() 408 * functions which must match the way they are locked/unlocked outside 409 * of this macro. 410 * 411 * wake_up_locked() has to be called after changing any variable that could 412 * change the result of the wait condition. 413 * 414 * The function will return -ERESTARTSYS if it was interrupted by a 415 * signal and 0 if @condition evaluated to true. 416 */ 417#define wait_event_interruptible_locked(wq, condition) \ 418 ((condition) \ 419 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) 420 421/** 422 * wait_event_interruptible_locked_irq - sleep until a condition gets true 423 * @wq: the waitqueue to wait on 424 * @condition: a C expression for the event to wait for 425 * 426 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 427 * @condition evaluates to true or a signal is received. 428 * The @condition is checked each time the waitqueue @wq is woken up. 429 * 430 * It must be called with wq.lock being held. This spinlock is 431 * unlocked while sleeping but @condition testing is done while lock 432 * is held and when this macro exits the lock is held. 433 * 434 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 435 * functions which must match the way they are locked/unlocked outside 436 * of this macro. 437 * 438 * wake_up_locked() has to be called after changing any variable that could 439 * change the result of the wait condition. 440 * 441 * The function will return -ERESTARTSYS if it was interrupted by a 442 * signal and 0 if @condition evaluated to true. 443 */ 444#define wait_event_interruptible_locked_irq(wq, condition) \ 445 ((condition) \ 446 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) 447 448/** 449 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true 450 * @wq: the waitqueue to wait on 451 * @condition: a C expression for the event to wait for 452 * 453 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 454 * @condition evaluates to true or a signal is received. 455 * The @condition is checked each time the waitqueue @wq is woken up. 456 * 457 * It must be called with wq.lock being held. This spinlock is 458 * unlocked while sleeping but @condition testing is done while lock 459 * is held and when this macro exits the lock is held. 460 * 461 * The lock is locked/unlocked using spin_lock()/spin_unlock() 462 * functions which must match the way they are locked/unlocked outside 463 * of this macro. 464 * 465 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 466 * set thus when other process waits process on the list if this 467 * process is awaken further processes are not considered. 468 * 469 * wake_up_locked() has to be called after changing any variable that could 470 * change the result of the wait condition. 471 * 472 * The function will return -ERESTARTSYS if it was interrupted by a 473 * signal and 0 if @condition evaluated to true. 474 */ 475#define wait_event_interruptible_exclusive_locked(wq, condition) \ 476 ((condition) \ 477 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) 478 479/** 480 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true 481 * @wq: the waitqueue to wait on 482 * @condition: a C expression for the event to wait for 483 * 484 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 485 * @condition evaluates to true or a signal is received. 486 * The @condition is checked each time the waitqueue @wq is woken up. 487 * 488 * It must be called with wq.lock being held. This spinlock is 489 * unlocked while sleeping but @condition testing is done while lock 490 * is held and when this macro exits the lock is held. 491 * 492 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq() 493 * functions which must match the way they are locked/unlocked outside 494 * of this macro. 495 * 496 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag 497 * set thus when other process waits process on the list if this 498 * process is awaken further processes are not considered. 499 * 500 * wake_up_locked() has to be called after changing any variable that could 501 * change the result of the wait condition. 502 * 503 * The function will return -ERESTARTSYS if it was interrupted by a 504 * signal and 0 if @condition evaluated to true. 505 */ 506#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ 507 ((condition) \ 508 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 509 510 511 512#define __wait_event_killable(wq, condition, ret) \ 513do { \ 514 DEFINE_WAIT(__wait); \ 515 \ 516 for (;;) { \ 517 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \ 518 if (condition) \ 519 break; \ 520 if (!fatal_signal_pending(current)) { \ 521 schedule(); \ 522 continue; \ 523 } \ 524 ret = -ERESTARTSYS; \ 525 break; \ 526 } \ 527 finish_wait(&wq, &__wait); \ 528} while (0) 529 530/** 531 * wait_event_killable - sleep until a condition gets true 532 * @wq: the waitqueue to wait on 533 * @condition: a C expression for the event to wait for 534 * 535 * The process is put to sleep (TASK_KILLABLE) until the 536 * @condition evaluates to true or a signal is received. 537 * The @condition is checked each time the waitqueue @wq is woken up. 538 * 539 * wake_up() has to be called after changing any variable that could 540 * change the result of the wait condition. 541 * 542 * The function will return -ERESTARTSYS if it was interrupted by a 543 * signal and 0 if @condition evaluated to true. 544 */ 545#define wait_event_killable(wq, condition) \ 546({ \ 547 int __ret = 0; \ 548 if (!(condition)) \ 549 __wait_event_killable(wq, condition, __ret); \ 550 __ret; \ 551}) 552 553 554#define __wait_event_lock_irq(wq, condition, lock, cmd) \ 555do { \ 556 DEFINE_WAIT(__wait); \ 557 \ 558 for (;;) { \ 559 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 560 if (condition) \ 561 break; \ 562 spin_unlock_irq(&lock); \ 563 cmd; \ 564 schedule(); \ 565 spin_lock_irq(&lock); \ 566 } \ 567 finish_wait(&wq, &__wait); \ 568} while (0) 569 570/** 571 * wait_event_lock_irq_cmd - sleep until a condition gets true. The 572 * condition is checked under the lock. This 573 * is expected to be called with the lock 574 * taken. 575 * @wq: the waitqueue to wait on 576 * @condition: a C expression for the event to wait for 577 * @lock: a locked spinlock_t, which will be released before cmd 578 * and schedule() and reacquired afterwards. 579 * @cmd: a command which is invoked outside the critical section before 580 * sleep 581 * 582 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 583 * @condition evaluates to true. The @condition is checked each time 584 * the waitqueue @wq is woken up. 585 * 586 * wake_up() has to be called after changing any variable that could 587 * change the result of the wait condition. 588 * 589 * This is supposed to be called while holding the lock. The lock is 590 * dropped before invoking the cmd and going to sleep and is reacquired 591 * afterwards. 592 */ 593#define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \ 594do { \ 595 if (condition) \ 596 break; \ 597 __wait_event_lock_irq(wq, condition, lock, cmd); \ 598} while (0) 599 600/** 601 * wait_event_lock_irq - sleep until a condition gets true. The 602 * condition is checked under the lock. This 603 * is expected to be called with the lock 604 * taken. 605 * @wq: the waitqueue to wait on 606 * @condition: a C expression for the event to wait for 607 * @lock: a locked spinlock_t, which will be released before schedule() 608 * and reacquired afterwards. 609 * 610 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 611 * @condition evaluates to true. The @condition is checked each time 612 * the waitqueue @wq is woken up. 613 * 614 * wake_up() has to be called after changing any variable that could 615 * change the result of the wait condition. 616 * 617 * This is supposed to be called while holding the lock. The lock is 618 * dropped before going to sleep and is reacquired afterwards. 619 */ 620#define wait_event_lock_irq(wq, condition, lock) \ 621do { \ 622 if (condition) \ 623 break; \ 624 __wait_event_lock_irq(wq, condition, lock, ); \ 625} while (0) 626 627 628#define __wait_event_interruptible_lock_irq(wq, condition, \ 629 lock, ret, cmd) \ 630do { \ 631 DEFINE_WAIT(__wait); \ 632 \ 633 for (;;) { \ 634 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 635 if (condition) \ 636 break; \ 637 if (signal_pending(current)) { \ 638 ret = -ERESTARTSYS; \ 639 break; \ 640 } \ 641 spin_unlock_irq(&lock); \ 642 cmd; \ 643 schedule(); \ 644 spin_lock_irq(&lock); \ 645 } \ 646 finish_wait(&wq, &__wait); \ 647} while (0) 648 649/** 650 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true. 651 * The condition is checked under the lock. This is expected to 652 * be called with the lock taken. 653 * @wq: the waitqueue to wait on 654 * @condition: a C expression for the event to wait for 655 * @lock: a locked spinlock_t, which will be released before cmd and 656 * schedule() and reacquired afterwards. 657 * @cmd: a command which is invoked outside the critical section before 658 * sleep 659 * 660 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 661 * @condition evaluates to true or a signal is received. The @condition is 662 * checked each time the waitqueue @wq is woken up. 663 * 664 * wake_up() has to be called after changing any variable that could 665 * change the result of the wait condition. 666 * 667 * This is supposed to be called while holding the lock. The lock is 668 * dropped before invoking the cmd and going to sleep and is reacquired 669 * afterwards. 670 * 671 * The macro will return -ERESTARTSYS if it was interrupted by a signal 672 * and 0 if @condition evaluated to true. 673 */ 674#define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 675({ \ 676 int __ret = 0; \ 677 \ 678 if (!(condition)) \ 679 __wait_event_interruptible_lock_irq(wq, condition, \ 680 lock, __ret, cmd); \ 681 __ret; \ 682}) 683 684/** 685 * wait_event_interruptible_lock_irq - sleep until a condition gets true. 686 * The condition is checked under the lock. This is expected 687 * to be called with the lock taken. 688 * @wq: the waitqueue to wait on 689 * @condition: a C expression for the event to wait for 690 * @lock: a locked spinlock_t, which will be released before schedule() 691 * and reacquired afterwards. 692 * 693 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 694 * @condition evaluates to true or signal is received. The @condition is 695 * checked each time the waitqueue @wq is woken up. 696 * 697 * wake_up() has to be called after changing any variable that could 698 * change the result of the wait condition. 699 * 700 * This is supposed to be called while holding the lock. The lock is 701 * dropped before going to sleep and is reacquired afterwards. 702 * 703 * The macro will return -ERESTARTSYS if it was interrupted by a signal 704 * and 0 if @condition evaluated to true. 705 */ 706#define wait_event_interruptible_lock_irq(wq, condition, lock) \ 707({ \ 708 int __ret = 0; \ 709 \ 710 if (!(condition)) \ 711 __wait_event_interruptible_lock_irq(wq, condition, \ 712 lock, __ret, ); \ 713 __ret; \ 714}) 715 716 717/* 718 * These are the old interfaces to sleep waiting for an event. 719 * They are racy. DO NOT use them, use the wait_event* interfaces above. 720 * We plan to remove these interfaces. 721 */ 722extern void sleep_on(wait_queue_head_t *q); 723extern long sleep_on_timeout(wait_queue_head_t *q, 724 signed long timeout); 725extern void interruptible_sleep_on(wait_queue_head_t *q); 726extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, 727 signed long timeout); 728 729/* 730 * Waitqueues which are removed from the waitqueue_head at wakeup time 731 */ 732void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 733void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 734void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 735void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 736 unsigned int mode, void *key); 737int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 738int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 739 740#define DEFINE_WAIT_FUNC(name, function) \ 741 wait_queue_t name = { \ 742 .private = current, \ 743 .func = function, \ 744 .task_list = LIST_HEAD_INIT((name).task_list), \ 745 } 746 747#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) 748 749#define DEFINE_WAIT_BIT(name, word, bit) \ 750 struct wait_bit_queue name = { \ 751 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ 752 .wait = { \ 753 .private = current, \ 754 .func = wake_bit_function, \ 755 .task_list = \ 756 LIST_HEAD_INIT((name).wait.task_list), \ 757 }, \ 758 } 759 760#define init_wait(wait) \ 761 do { \ 762 (wait)->private = current; \ 763 (wait)->func = autoremove_wake_function; \ 764 INIT_LIST_HEAD(&(wait)->task_list); \ 765 (wait)->flags = 0; \ 766 } while (0) 767 768/** 769 * wait_on_bit - wait for a bit to be cleared 770 * @word: the word being waited on, a kernel virtual address 771 * @bit: the bit of the word being waited on 772 * @action: the function used to sleep, which may take special actions 773 * @mode: the task state to sleep in 774 * 775 * There is a standard hashed waitqueue table for generic use. This 776 * is the part of the hashtable's accessor API that waits on a bit. 777 * For instance, if one were to have waiters on a bitflag, one would 778 * call wait_on_bit() in threads waiting for the bit to clear. 779 * One uses wait_on_bit() where one is waiting for the bit to clear, 780 * but has no intention of setting it. 781 */ 782static inline int wait_on_bit(void *word, int bit, 783 int (*action)(void *), unsigned mode) 784{ 785 if (!test_bit(bit, word)) 786 return 0; 787 return out_of_line_wait_on_bit(word, bit, action, mode); 788} 789 790/** 791 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it 792 * @word: the word being waited on, a kernel virtual address 793 * @bit: the bit of the word being waited on 794 * @action: the function used to sleep, which may take special actions 795 * @mode: the task state to sleep in 796 * 797 * There is a standard hashed waitqueue table for generic use. This 798 * is the part of the hashtable's accessor API that waits on a bit 799 * when one intends to set it, for instance, trying to lock bitflags. 800 * For instance, if one were to have waiters trying to set bitflag 801 * and waiting for it to clear before setting it, one would call 802 * wait_on_bit() in threads waiting to be able to set the bit. 803 * One uses wait_on_bit_lock() where one is waiting for the bit to 804 * clear with the intention of setting it, and when done, clearing it. 805 */ 806static inline int wait_on_bit_lock(void *word, int bit, 807 int (*action)(void *), unsigned mode) 808{ 809 if (!test_and_set_bit(bit, word)) 810 return 0; 811 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 812} 813 814#endif