1#ifndef _LINUX_WAIT_H 2#define _LINUX_WAIT_H 3 4#define WNOHANG 0x00000001 5#define WUNTRACED 0x00000002 6#define WSTOPPED WUNTRACED 7#define WEXITED 0x00000004 8#define WCONTINUED 0x00000008 9#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */ 10 11#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */ 12#define __WALL 0x40000000 /* Wait on all children, regardless of type */ 13#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */ 14 15/* First argument to waitid: */ 16#define P_ALL 0 17#define P_PID 1 18#define P_PGID 2 19 20#ifdef __KERNEL__ 21 22#include <linux/list.h> 23#include <linux/stddef.h> 24#include <linux/spinlock.h> 25#include <asm/system.h> 26#include <asm/current.h> 27 28typedef struct __wait_queue wait_queue_t; 29typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key); 30int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 31 32struct __wait_queue { 33 unsigned int flags; 34#define WQ_FLAG_EXCLUSIVE 0x01 35 void *private; 36 wait_queue_func_t func; 37 struct list_head task_list; 38}; 39 40struct wait_bit_key { 41 void *flags; 42 int bit_nr; 43}; 44 45struct wait_bit_queue { 46 struct wait_bit_key key; 47 wait_queue_t wait; 48}; 49 50struct __wait_queue_head { 51 spinlock_t lock; 52 struct list_head task_list; 53}; 54typedef struct __wait_queue_head wait_queue_head_t; 55 56struct task_struct; 57 58/* 59 * Macros for declaration and initialisaton of the datatypes 60 */ 61 62#define __WAITQUEUE_INITIALIZER(name, tsk) { \ 63 .private = tsk, \ 64 .func = default_wake_function, \ 65 .task_list = { NULL, NULL } } 66 67#define DECLARE_WAITQUEUE(name, tsk) \ 68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 69 70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 71 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 72 .task_list = { &(name).task_list, &(name).task_list } } 73 74#define DECLARE_WAIT_QUEUE_HEAD(name) \ 75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name) 76 77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 78 { .flags = word, .bit_nr = bit, } 79 80extern void init_waitqueue_head(wait_queue_head_t *q); 81 82#ifdef CONFIG_LOCKDEP 83# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ 84 ({ init_waitqueue_head(&name); name; }) 85# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \ 86 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) 87#else 88# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) 89#endif 90 91static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 92{ 93 q->flags = 0; 94 q->private = p; 95 q->func = default_wake_function; 96} 97 98static inline void init_waitqueue_func_entry(wait_queue_t *q, 99 wait_queue_func_t func) 100{ 101 q->flags = 0; 102 q->private = NULL; 103 q->func = func; 104} 105 106static inline int waitqueue_active(wait_queue_head_t *q) 107{ 108 return !list_empty(&q->task_list); 109} 110 111/* 112 * Used to distinguish between sync and async io wait context: 113 * sync i/o typically specifies a NULL wait queue entry or a wait 114 * queue entry bound to a task (current task) to wake up. 115 * aio specifies a wait queue entry with an async notification 116 * callback routine, not associated with any task. 117 */ 118#define is_sync_wait(wait) (!(wait) || ((wait)->private)) 119 120extern void FASTCALL(add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); 121extern void FASTCALL(add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t * wait)); 122extern void FASTCALL(remove_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)); 123 124static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) 125{ 126 list_add(&new->task_list, &head->task_list); 127} 128 129/* 130 * Used for wake-one threads: 131 */ 132static inline void __add_wait_queue_tail(wait_queue_head_t *head, 133 wait_queue_t *new) 134{ 135 list_add_tail(&new->task_list, &head->task_list); 136} 137 138static inline void __remove_wait_queue(wait_queue_head_t *head, 139 wait_queue_t *old) 140{ 141 list_del(&old->task_list); 142} 143 144void FASTCALL(__wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key)); 145extern void FASTCALL(__wake_up_locked(wait_queue_head_t *q, unsigned int mode)); 146extern void FASTCALL(__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)); 147void FASTCALL(__wake_up_bit(wait_queue_head_t *, void *, int)); 148int FASTCALL(__wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned)); 149int FASTCALL(__wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned)); 150void FASTCALL(wake_up_bit(void *, int)); 151int FASTCALL(out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned)); 152int FASTCALL(out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned)); 153wait_queue_head_t *FASTCALL(bit_waitqueue(void *, int)); 154 155#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) 156#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) 157#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) 158#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL) 159 160#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) 161#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) 162#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 163#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 164 165#define __wait_event(wq, condition) \ 166do { \ 167 DEFINE_WAIT(__wait); \ 168 \ 169 for (;;) { \ 170 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 171 if (condition) \ 172 break; \ 173 schedule(); \ 174 } \ 175 finish_wait(&wq, &__wait); \ 176} while (0) 177 178/** 179 * wait_event - sleep until a condition gets true 180 * @wq: the waitqueue to wait on 181 * @condition: a C expression for the event to wait for 182 * 183 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 184 * @condition evaluates to true. The @condition is checked each time 185 * the waitqueue @wq is woken up. 186 * 187 * wake_up() has to be called after changing any variable that could 188 * change the result of the wait condition. 189 */ 190#define wait_event(wq, condition) \ 191do { \ 192 if (condition) \ 193 break; \ 194 __wait_event(wq, condition); \ 195} while (0) 196 197#define __wait_event_timeout(wq, condition, ret) \ 198do { \ 199 DEFINE_WAIT(__wait); \ 200 \ 201 for (;;) { \ 202 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ 203 if (condition) \ 204 break; \ 205 ret = schedule_timeout(ret); \ 206 if (!ret) \ 207 break; \ 208 } \ 209 finish_wait(&wq, &__wait); \ 210} while (0) 211 212/** 213 * wait_event_timeout - sleep until a condition gets true or a timeout elapses 214 * @wq: the waitqueue to wait on 215 * @condition: a C expression for the event to wait for 216 * @timeout: timeout, in jiffies 217 * 218 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the 219 * @condition evaluates to true. The @condition is checked each time 220 * the waitqueue @wq is woken up. 221 * 222 * wake_up() has to be called after changing any variable that could 223 * change the result of the wait condition. 224 * 225 * The function returns 0 if the @timeout elapsed, and the remaining 226 * jiffies if the condition evaluated to true before the timeout elapsed. 227 */ 228#define wait_event_timeout(wq, condition, timeout) \ 229({ \ 230 long __ret = timeout; \ 231 if (!(condition)) \ 232 __wait_event_timeout(wq, condition, __ret); \ 233 __ret; \ 234}) 235 236#define __wait_event_interruptible(wq, condition, ret) \ 237do { \ 238 DEFINE_WAIT(__wait); \ 239 \ 240 for (;;) { \ 241 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 242 if (condition) \ 243 break; \ 244 if (!signal_pending(current)) { \ 245 schedule(); \ 246 continue; \ 247 } \ 248 ret = -ERESTARTSYS; \ 249 break; \ 250 } \ 251 finish_wait(&wq, &__wait); \ 252} while (0) 253 254/** 255 * wait_event_interruptible - sleep until a condition gets true 256 * @wq: the waitqueue to wait on 257 * @condition: a C expression for the event to wait for 258 * 259 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 260 * @condition evaluates to true or a signal is received. 261 * The @condition is checked each time the waitqueue @wq is woken up. 262 * 263 * wake_up() has to be called after changing any variable that could 264 * change the result of the wait condition. 265 * 266 * The function will return -ERESTARTSYS if it was interrupted by a 267 * signal and 0 if @condition evaluated to true. 268 */ 269#define wait_event_interruptible(wq, condition) \ 270({ \ 271 int __ret = 0; \ 272 if (!(condition)) \ 273 __wait_event_interruptible(wq, condition, __ret); \ 274 __ret; \ 275}) 276 277#define __wait_event_interruptible_timeout(wq, condition, ret) \ 278do { \ 279 DEFINE_WAIT(__wait); \ 280 \ 281 for (;;) { \ 282 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ 283 if (condition) \ 284 break; \ 285 if (!signal_pending(current)) { \ 286 ret = schedule_timeout(ret); \ 287 if (!ret) \ 288 break; \ 289 continue; \ 290 } \ 291 ret = -ERESTARTSYS; \ 292 break; \ 293 } \ 294 finish_wait(&wq, &__wait); \ 295} while (0) 296 297/** 298 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses 299 * @wq: the waitqueue to wait on 300 * @condition: a C expression for the event to wait for 301 * @timeout: timeout, in jiffies 302 * 303 * The process is put to sleep (TASK_INTERRUPTIBLE) until the 304 * @condition evaluates to true or a signal is received. 305 * The @condition is checked each time the waitqueue @wq is woken up. 306 * 307 * wake_up() has to be called after changing any variable that could 308 * change the result of the wait condition. 309 * 310 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 311 * was interrupted by a signal, and the remaining jiffies otherwise 312 * if the condition evaluated to true before the timeout elapsed. 313 */ 314#define wait_event_interruptible_timeout(wq, condition, timeout) \ 315({ \ 316 long __ret = timeout; \ 317 if (!(condition)) \ 318 __wait_event_interruptible_timeout(wq, condition, __ret); \ 319 __ret; \ 320}) 321 322#define __wait_event_interruptible_exclusive(wq, condition, ret) \ 323do { \ 324 DEFINE_WAIT(__wait); \ 325 \ 326 for (;;) { \ 327 prepare_to_wait_exclusive(&wq, &__wait, \ 328 TASK_INTERRUPTIBLE); \ 329 if (condition) \ 330 break; \ 331 if (!signal_pending(current)) { \ 332 schedule(); \ 333 continue; \ 334 } \ 335 ret = -ERESTARTSYS; \ 336 break; \ 337 } \ 338 finish_wait(&wq, &__wait); \ 339} while (0) 340 341#define wait_event_interruptible_exclusive(wq, condition) \ 342({ \ 343 int __ret = 0; \ 344 if (!(condition)) \ 345 __wait_event_interruptible_exclusive(wq, condition, __ret);\ 346 __ret; \ 347}) 348 349#define __wait_event_killable(wq, condition, ret) \ 350do { \ 351 DEFINE_WAIT(__wait); \ 352 \ 353 for (;;) { \ 354 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \ 355 if (condition) \ 356 break; \ 357 if (!fatal_signal_pending(current)) { \ 358 schedule(); \ 359 continue; \ 360 } \ 361 ret = -ERESTARTSYS; \ 362 break; \ 363 } \ 364 finish_wait(&wq, &__wait); \ 365} while (0) 366 367/** 368 * wait_event_killable - sleep until a condition gets true 369 * @wq: the waitqueue to wait on 370 * @condition: a C expression for the event to wait for 371 * 372 * The process is put to sleep (TASK_KILLABLE) until the 373 * @condition evaluates to true or a signal is received. 374 * The @condition is checked each time the waitqueue @wq is woken up. 375 * 376 * wake_up() has to be called after changing any variable that could 377 * change the result of the wait condition. 378 * 379 * The function will return -ERESTARTSYS if it was interrupted by a 380 * signal and 0 if @condition evaluated to true. 381 */ 382#define wait_event_killable(wq, condition) \ 383({ \ 384 int __ret = 0; \ 385 if (!(condition)) \ 386 __wait_event_killable(wq, condition, __ret); \ 387 __ret; \ 388}) 389 390/* 391 * Must be called with the spinlock in the wait_queue_head_t held. 392 */ 393static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q, 394 wait_queue_t * wait) 395{ 396 wait->flags |= WQ_FLAG_EXCLUSIVE; 397 __add_wait_queue_tail(q, wait); 398} 399 400/* 401 * Must be called with the spinlock in the wait_queue_head_t held. 402 */ 403static inline void remove_wait_queue_locked(wait_queue_head_t *q, 404 wait_queue_t * wait) 405{ 406 __remove_wait_queue(q, wait); 407} 408 409/* 410 * These are the old interfaces to sleep waiting for an event. 411 * They are racy. DO NOT use them, use the wait_event* interfaces above. 412 * We plan to remove these interfaces. 413 */ 414extern void sleep_on(wait_queue_head_t *q); 415extern long sleep_on_timeout(wait_queue_head_t *q, 416 signed long timeout); 417extern void interruptible_sleep_on(wait_queue_head_t *q); 418extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, 419 signed long timeout); 420 421/* 422 * Waitqueues which are removed from the waitqueue_head at wakeup time 423 */ 424void FASTCALL(prepare_to_wait(wait_queue_head_t *q, 425 wait_queue_t *wait, int state)); 426void FASTCALL(prepare_to_wait_exclusive(wait_queue_head_t *q, 427 wait_queue_t *wait, int state)); 428void FASTCALL(finish_wait(wait_queue_head_t *q, wait_queue_t *wait)); 429int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 430int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 431 432#define DEFINE_WAIT(name) \ 433 wait_queue_t name = { \ 434 .private = current, \ 435 .func = autoremove_wake_function, \ 436 .task_list = LIST_HEAD_INIT((name).task_list), \ 437 } 438 439#define DEFINE_WAIT_BIT(name, word, bit) \ 440 struct wait_bit_queue name = { \ 441 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ 442 .wait = { \ 443 .private = current, \ 444 .func = wake_bit_function, \ 445 .task_list = \ 446 LIST_HEAD_INIT((name).wait.task_list), \ 447 }, \ 448 } 449 450#define init_wait(wait) \ 451 do { \ 452 (wait)->private = current; \ 453 (wait)->func = autoremove_wake_function; \ 454 INIT_LIST_HEAD(&(wait)->task_list); \ 455 } while (0) 456 457/** 458 * wait_on_bit - wait for a bit to be cleared 459 * @word: the word being waited on, a kernel virtual address 460 * @bit: the bit of the word being waited on 461 * @action: the function used to sleep, which may take special actions 462 * @mode: the task state to sleep in 463 * 464 * There is a standard hashed waitqueue table for generic use. This 465 * is the part of the hashtable's accessor API that waits on a bit. 466 * For instance, if one were to have waiters on a bitflag, one would 467 * call wait_on_bit() in threads waiting for the bit to clear. 468 * One uses wait_on_bit() where one is waiting for the bit to clear, 469 * but has no intention of setting it. 470 */ 471static inline int wait_on_bit(void *word, int bit, 472 int (*action)(void *), unsigned mode) 473{ 474 if (!test_bit(bit, word)) 475 return 0; 476 return out_of_line_wait_on_bit(word, bit, action, mode); 477} 478 479/** 480 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it 481 * @word: the word being waited on, a kernel virtual address 482 * @bit: the bit of the word being waited on 483 * @action: the function used to sleep, which may take special actions 484 * @mode: the task state to sleep in 485 * 486 * There is a standard hashed waitqueue table for generic use. This 487 * is the part of the hashtable's accessor API that waits on a bit 488 * when one intends to set it, for instance, trying to lock bitflags. 489 * For instance, if one were to have waiters trying to set bitflag 490 * and waiting for it to clear before setting it, one would call 491 * wait_on_bit() in threads waiting to be able to set the bit. 492 * One uses wait_on_bit_lock() where one is waiting for the bit to 493 * clear with the intention of setting it, and when done, clearing it. 494 */ 495static inline int wait_on_bit_lock(void *word, int bit, 496 int (*action)(void *), unsigned mode) 497{ 498 if (!test_and_set_bit(bit, word)) 499 return 0; 500 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 501} 502 503#endif /* __KERNEL__ */ 504 505#endif