Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/wait: Rename wait_queue_t => wait_queue_entry_t

Rename:

wait_queue_t => wait_queue_entry_t

'wait_queue_t' was always a slight misnomer: its name implies that it's a "queue",
but in reality it's a queue *entry*. The 'real' queue is the wait queue head,
which had to carry the name.

Start sorting this out by renaming it to 'wait_queue_entry_t'.

This also allows the real structure name 'struct __wait_queue' to
lose its double underscore and become 'struct wait_queue_entry',
which is the more canonical nomenclature for such data types.

Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

+216 -213
+1 -1
Documentation/DocBook/kernel-hacking.tmpl
··· 819 819 certain condition is true. They must be used carefully to ensure 820 820 there is no race condition. You declare a 821 821 <type>wait_queue_head_t</type>, and then processes which want to 822 - wait for that condition declare a <type>wait_queue_t</type> 822 + wait for that condition declare a <type>wait_queue_entry_t</type> 823 823 referring to themselves, and place that in the queue. 824 824 </para> 825 825
+6 -6
Documentation/filesystems/autofs4.txt
··· 316 316 struct autofs_v5_packet { 317 317 int proto_version; /* Protocol version */ 318 318 int type; /* Type of packet */ 319 - autofs_wqt_t wait_queue_token; 319 + autofs_wqt_t wait_queue_entry_token; 320 320 __u32 dev; 321 321 __u64 ino; 322 322 __u32 uid; ··· 341 341 `O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at 342 342 most one packet, and any unread portion of a packet will be discarded. 343 343 344 - The `wait_queue_token` is a unique number which can identify a 344 + The `wait_queue_entry_token` is a unique number which can identify a 345 345 particular request to be acknowledged. When a message is sent over 346 346 the pipe the affected dentry is marked as either "active" or 347 347 "expiring" and other accesses to it block until the message is 348 348 acknowledged using one of the ioctls below and the relevant 349 - `wait_queue_token`. 349 + `wait_queue_entry_token`. 350 350 351 351 Communicating with autofs: root directory ioctls 352 352 ------------------------------------------------ ··· 358 358 The available ioctl commands are: 359 359 360 360 - **AUTOFS_IOC_READY**: a notification has been handled. The argument 361 - to the ioctl command is the "wait_queue_token" number 361 + to the ioctl command is the "wait_queue_entry_token" number 362 362 corresponding to the notification being acknowledged. 363 363 - **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with 364 364 the error code `ENOENT`. ··· 382 382 struct autofs_packet_expire_multi { 383 383 int proto_version; /* Protocol version */ 384 384 int type; /* Type of packet */ 385 - autofs_wqt_t wait_queue_token; 385 + autofs_wqt_t wait_queue_entry_token; 386 386 int len; 387 387 char name[NAME_MAX+1]; 388 388 }; 389 389 390 390 is required. This is filled in with the name of something 391 391 that can be unmounted or removed. If nothing can be expired, 392 - `errno` is set to `EAGAIN`. Even though a `wait_queue_token` 392 + `errno` is set to `EAGAIN`. Even though a `wait_queue_entry_token` 393 393 is present in the structure, no "wait queue" is established 394 394 and no acknowledgment is needed. 395 395 - **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to
+1 -1
block/blk-mq.c
··· 926 926 return first != NULL; 927 927 } 928 928 929 - static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags, 929 + static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 930 930 void *key) 931 931 { 932 932 struct blk_mq_hw_ctx *hctx;
+1 -1
block/blk-wbt.c
··· 503 503 } 504 504 505 505 static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw, 506 - wait_queue_t *wait, unsigned long rw) 506 + wait_queue_entry_t *wait, unsigned long rw) 507 507 { 508 508 /* 509 509 * inc it here even if disabled, since we'll dec it at completion.
+4 -4
block/kyber-iosched.c
··· 99 99 struct list_head rqs[KYBER_NUM_DOMAINS]; 100 100 unsigned int cur_domain; 101 101 unsigned int batching; 102 - wait_queue_t domain_wait[KYBER_NUM_DOMAINS]; 102 + wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS]; 103 103 atomic_t wait_index[KYBER_NUM_DOMAINS]; 104 104 }; 105 105 ··· 507 507 } 508 508 } 509 509 510 - static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags, 510 + static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags, 511 511 void *key) 512 512 { 513 513 struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private); ··· 523 523 { 524 524 unsigned int sched_domain = khd->cur_domain; 525 525 struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain]; 526 - wait_queue_t *wait = &khd->domain_wait[sched_domain]; 526 + wait_queue_entry_t *wait = &khd->domain_wait[sched_domain]; 527 527 struct sbq_wait_state *ws; 528 528 int nr; 529 529 ··· 734 734 { \ 735 735 struct blk_mq_hw_ctx *hctx = data; \ 736 736 struct kyber_hctx_data *khd = hctx->sched_data; \ 737 - wait_queue_t *wait = &khd->domain_wait[domain]; \ 737 + wait_queue_entry_t *wait = &khd->domain_wait[domain]; \ 738 738 \ 739 739 seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \ 740 740 return 0; \
+1 -1
drivers/bluetooth/btmrvl_main.c
··· 602 602 struct btmrvl_thread *thread = data; 603 603 struct btmrvl_private *priv = thread->priv; 604 604 struct btmrvl_adapter *adapter = priv->adapter; 605 - wait_queue_t wait; 605 + wait_queue_entry_t wait; 606 606 struct sk_buff *skb; 607 607 ulong flags; 608 608
+1 -1
drivers/char/ipmi/ipmi_watchdog.c
··· 821 821 loff_t *ppos) 822 822 { 823 823 int rv = 0; 824 - wait_queue_t wait; 824 + wait_queue_entry_t wait; 825 825 826 826 if (count <= 0) 827 827 return 0;
+1 -1
drivers/gpu/drm/i915/i915_gem_request.h
··· 123 123 * It is used by the driver to then queue the request for execution. 124 124 */ 125 125 struct i915_sw_fence submit; 126 - wait_queue_t submitq; 126 + wait_queue_entry_t submitq; 127 127 wait_queue_head_t execute; 128 128 129 129 /* A list of everyone we wait upon, and everyone who waits upon us.
+7 -7
drivers/gpu/drm/i915/i915_sw_fence.c
··· 152 152 struct list_head *continuation) 153 153 { 154 154 wait_queue_head_t *x = &fence->wait; 155 - wait_queue_t *pos, *next; 155 + wait_queue_entry_t *pos, *next; 156 156 unsigned long flags; 157 157 158 158 debug_fence_deactivate(fence); ··· 254 254 __i915_sw_fence_commit(fence); 255 255 } 256 256 257 - static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key) 257 + static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) 258 258 { 259 259 list_del(&wq->task_list); 260 260 __i915_sw_fence_complete(wq->private, key); ··· 267 267 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, 268 268 const struct i915_sw_fence * const signaler) 269 269 { 270 - wait_queue_t *wq; 270 + wait_queue_entry_t *wq; 271 271 272 272 if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 273 273 return false; ··· 288 288 289 289 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence) 290 290 { 291 - wait_queue_t *wq; 291 + wait_queue_entry_t *wq; 292 292 293 293 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags)) 294 294 return; ··· 320 320 321 321 static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, 322 322 struct i915_sw_fence *signaler, 323 - wait_queue_t *wq, gfp_t gfp) 323 + wait_queue_entry_t *wq, gfp_t gfp) 324 324 { 325 325 unsigned long flags; 326 326 int pending; ··· 359 359 360 360 spin_lock_irqsave(&signaler->wait.lock, flags); 361 361 if (likely(!i915_sw_fence_done(signaler))) { 362 - __add_wait_queue_tail(&signaler->wait, wq); 362 + __add_wait_queue_entry_tail(&signaler->wait, wq); 363 363 pending = 1; 364 364 } else { 365 365 i915_sw_fence_wake(wq, 0, 0, NULL); ··· 372 372 373 373 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, 374 374 struct i915_sw_fence *signaler, 375 - wait_queue_t *wq) 375 + wait_queue_entry_t *wq) 376 376 { 377 377 return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); 378 378 }
+1 -1
drivers/gpu/drm/i915/i915_sw_fence.h
··· 66 66 67 67 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, 68 68 struct i915_sw_fence *after, 69 - wait_queue_t *wq); 69 + wait_queue_entry_t *wq); 70 70 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence, 71 71 struct i915_sw_fence *after, 72 72 gfp_t gfp);
+1 -1
drivers/gpu/drm/radeon/radeon.h
··· 375 375 unsigned ring; 376 376 bool is_vm_update; 377 377 378 - wait_queue_t fence_wake; 378 + wait_queue_entry_t fence_wake; 379 379 }; 380 380 381 381 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
+1 -1
drivers/gpu/drm/radeon/radeon_fence.c
··· 158 158 * for the fence locking itself, so unlocked variants are used for 159 159 * fence_signal, and remove_wait_queue. 160 160 */ 161 - static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key) 161 + static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key) 162 162 { 163 163 struct radeon_fence *fence; 164 164 u64 seq;
+1 -1
drivers/gpu/vga/vgaarb.c
··· 417 417 { 418 418 struct vga_device *vgadev, *conflict; 419 419 unsigned long flags; 420 - wait_queue_t wait; 420 + wait_queue_entry_t wait; 421 421 int rc = 0; 422 422 423 423 vga_check_first_use();
+1 -1
drivers/infiniband/hw/i40iw/i40iw_main.c
··· 1939 1939 bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) 1940 1940 { 1941 1941 struct i40iw_device *iwdev; 1942 - wait_queue_t wait; 1942 + wait_queue_entry_t wait; 1943 1943 1944 1944 iwdev = dev->back_dev; 1945 1945
+1 -1
drivers/md/bcache/btree.h
··· 207 207 208 208 struct btree_op { 209 209 /* for waiting on btree reserve in btree_split() */ 210 - wait_queue_t wait; 210 + wait_queue_entry_t wait; 211 211 212 212 /* Btree level at which we start taking write locks */ 213 213 short lock;
+2 -2
drivers/net/ethernet/cavium/liquidio/octeon_main.h
··· 144 144 sleep_cond(wait_queue_head_t *wait_queue, int *condition) 145 145 { 146 146 int errno = 0; 147 - wait_queue_t we; 147 + wait_queue_entry_t we; 148 148 149 149 init_waitqueue_entry(&we, current); 150 150 add_wait_queue(wait_queue, &we); ··· 171 171 int *condition, 172 172 int timeout) 173 173 { 174 - wait_queue_t we; 174 + wait_queue_entry_t we; 175 175 176 176 init_waitqueue_entry(&we, current); 177 177 add_wait_queue(wait_queue, &we);
+1 -1
drivers/net/wireless/cisco/airo.c
··· 3066 3066 if (ai->jobs) { 3067 3067 locked = down_interruptible(&ai->sem); 3068 3068 } else { 3069 - wait_queue_t wait; 3069 + wait_queue_entry_t wait; 3070 3070 3071 3071 init_waitqueue_entry(&wait, current); 3072 3072 add_wait_queue(&ai->thr_wait, &wait);
+1 -1
drivers/net/wireless/intersil/hostap/hostap_ioctl.c
··· 2544 2544 ret = -EINVAL; 2545 2545 } 2546 2546 if (local->iw_mode == IW_MODE_MASTER) { 2547 - wait_queue_t __wait; 2547 + wait_queue_entry_t __wait; 2548 2548 init_waitqueue_entry(&__wait, current); 2549 2549 add_wait_queue(&local->hostscan_wq, &__wait); 2550 2550 set_current_state(TASK_INTERRUPTIBLE);
+1 -1
drivers/net/wireless/marvell/libertas/main.c
··· 453 453 { 454 454 struct net_device *dev = data; 455 455 struct lbs_private *priv = dev->ml_priv; 456 - wait_queue_t wait; 456 + wait_queue_entry_t wait; 457 457 458 458 lbs_deb_enter(LBS_DEB_THREAD); 459 459
+1 -1
drivers/scsi/dpt/dpti_i2o.h
··· 48 48 #include <linux/wait.h> 49 49 typedef wait_queue_head_t adpt_wait_queue_head_t; 50 50 #define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait) 51 - typedef wait_queue_t adpt_wait_queue_t; 51 + typedef wait_queue_entry_t adpt_wait_queue_entry_t; 52 52 53 53 /* 54 54 * message structures
+6 -6
drivers/scsi/ips.c
··· 301 301 static uint32_t ips_statupd_morpheus(ips_ha_t *); 302 302 static ips_scb_t *ips_getscb(ips_ha_t *); 303 303 static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *); 304 - static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *); 304 + static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *); 305 305 static void ips_putq_copp_tail(ips_copp_queue_t *, 306 306 ips_copp_wait_item_t *); 307 307 static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *); 308 308 static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *); 309 - static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *); 310 - static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *, 309 + static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *); 310 + static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *, 311 311 struct scsi_cmnd *); 312 312 static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *, 313 313 ips_copp_wait_item_t *); ··· 2871 2871 /* ASSUMED to be called from within the HA lock */ 2872 2872 /* */ 2873 2873 /****************************************************************************/ 2874 - static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item) 2874 + static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item) 2875 2875 { 2876 2876 METHOD_TRACE("ips_putq_wait_tail", 1); 2877 2877 ··· 2902 2902 /* ASSUMED to be called from within the HA lock */ 2903 2903 /* */ 2904 2904 /****************************************************************************/ 2905 - static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue) 2905 + static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue) 2906 2906 { 2907 2907 struct scsi_cmnd *item; 2908 2908 ··· 2936 2936 /* ASSUMED to be called from within the HA lock */ 2937 2937 /* */ 2938 2938 /****************************************************************************/ 2939 - static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue, 2939 + static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue, 2940 2940 struct scsi_cmnd *item) 2941 2941 { 2942 2942 struct scsi_cmnd *p;
+2 -2
drivers/scsi/ips.h
··· 989 989 struct scsi_cmnd *head; 990 990 struct scsi_cmnd *tail; 991 991 int count; 992 - } ips_wait_queue_t; 992 + } ips_wait_queue_entry_t; 993 993 994 994 typedef struct ips_copp_wait_item { 995 995 struct scsi_cmnd *scsi_cmd; ··· 1035 1035 ips_stat_t sp; /* Status packer pointer */ 1036 1036 struct ips_scb *scbs; /* Array of all CCBS */ 1037 1037 struct ips_scb *scb_freelist; /* SCB free list */ 1038 - ips_wait_queue_t scb_waitlist; /* Pending SCB list */ 1038 + ips_wait_queue_entry_t scb_waitlist; /* Pending SCB list */ 1039 1039 ips_copp_queue_t copp_waitlist; /* Pending PT list */ 1040 1040 ips_scb_queue_t scb_activelist; /* Active SCB list */ 1041 1041 IPS_IO_CMD *dummy; /* dummy command */
+3 -3
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
··· 3267 3267 kiblnd_connd(void *arg) 3268 3268 { 3269 3269 spinlock_t *lock = &kiblnd_data.kib_connd_lock; 3270 - wait_queue_t wait; 3270 + wait_queue_entry_t wait; 3271 3271 unsigned long flags; 3272 3272 struct kib_conn *conn; 3273 3273 int timeout; ··· 3521 3521 long id = (long)arg; 3522 3522 struct kib_sched_info *sched; 3523 3523 struct kib_conn *conn; 3524 - wait_queue_t wait; 3524 + wait_queue_entry_t wait; 3525 3525 unsigned long flags; 3526 3526 struct ib_wc wc; 3527 3527 int did_something; ··· 3656 3656 { 3657 3657 rwlock_t *glock = &kiblnd_data.kib_global_lock; 3658 3658 struct kib_dev *dev; 3659 - wait_queue_t wait; 3659 + wait_queue_entry_t wait; 3660 3660 unsigned long flags; 3661 3661 int rc; 3662 3662
+2 -2
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
··· 2166 2166 { 2167 2167 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; 2168 2168 struct ksock_connreq *cr; 2169 - wait_queue_t wait; 2169 + wait_queue_entry_t wait; 2170 2170 int nloops = 0; 2171 2171 int cons_retry = 0; 2172 2172 ··· 2554 2554 int 2555 2555 ksocknal_reaper(void *arg) 2556 2556 { 2557 - wait_queue_t wait; 2557 + wait_queue_entry_t wait; 2558 2558 struct ksock_conn *conn; 2559 2559 struct ksock_sched *sched; 2560 2560 struct list_head enomem_conns;
+1 -1
drivers/staging/lustre/lnet/libcfs/debug.c
··· 361 361 362 362 void libcfs_debug_dumplog(void) 363 363 { 364 - wait_queue_t wait; 364 + wait_queue_entry_t wait; 365 365 struct task_struct *dumper; 366 366 367 367 /* we're being careful to ensure that the kernel thread is
+1 -1
drivers/staging/lustre/lnet/libcfs/tracefile.c
··· 990 990 complete(&tctl->tctl_start); 991 991 992 992 while (1) { 993 - wait_queue_t __wait; 993 + wait_queue_entry_t __wait; 994 994 995 995 pc.pc_want_daemon_pages = 0; 996 996 collect_pages(&pc);
+1 -1
drivers/staging/lustre/lnet/lnet/lib-eq.c
··· 312 312 { 313 313 int tms = *timeout_ms; 314 314 int wait; 315 - wait_queue_t wl; 315 + wait_queue_entry_t wl; 316 316 unsigned long now; 317 317 318 318 if (!tms)
+1 -1
drivers/staging/lustre/lnet/lnet/lib-socket.c
··· 516 516 int 517 517 lnet_sock_accept(struct socket **newsockp, struct socket *sock) 518 518 { 519 - wait_queue_t wait; 519 + wait_queue_entry_t wait; 520 520 struct socket *newsock; 521 521 int rc; 522 522
+3 -3
drivers/staging/lustre/lustre/fid/fid_request.c
··· 192 192 } 193 193 194 194 static int seq_fid_alloc_prep(struct lu_client_seq *seq, 195 - wait_queue_t *link) 195 + wait_queue_entry_t *link) 196 196 { 197 197 if (seq->lcs_update) { 198 198 add_wait_queue(&seq->lcs_waitq, link); ··· 223 223 int seq_client_alloc_fid(const struct lu_env *env, 224 224 struct lu_client_seq *seq, struct lu_fid *fid) 225 225 { 226 - wait_queue_t link; 226 + wait_queue_entry_t link; 227 227 int rc; 228 228 229 229 LASSERT(seq); ··· 290 290 */ 291 291 void seq_client_flush(struct lu_client_seq *seq) 292 292 { 293 - wait_queue_t link; 293 + wait_queue_entry_t link; 294 294 295 295 LASSERT(seq); 296 296 init_waitqueue_entry(&link, current);
+2 -2
drivers/staging/lustre/lustre/include/lustre_lib.h
··· 201 201 sigmask(SIGALRM)) 202 202 203 203 /** 204 - * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively 204 + * wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively 205 205 * waiting threads, which is not always desirable because all threads will 206 206 * be waken up again and again, even user only needs a few of them to be 207 207 * active most time. This is not good for performance because cache can ··· 228 228 */ 229 229 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ 230 230 do { \ 231 - wait_queue_t __wait; \ 231 + wait_queue_entry_t __wait; \ 232 232 long __timeout = info->lwi_timeout; \ 233 233 sigset_t __blocked; \ 234 234 int __allow_intr = info->lwi_allow_intr; \
+1 -1
drivers/staging/lustre/lustre/llite/lcommon_cl.c
··· 207 207 static void cl_object_put_last(struct lu_env *env, struct cl_object *obj) 208 208 { 209 209 struct lu_object_header *header = obj->co_lu.lo_header; 210 - wait_queue_t waiter; 210 + wait_queue_entry_t waiter; 211 211 212 212 if (unlikely(atomic_read(&header->loh_ref) != 1)) { 213 213 struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+1 -1
drivers/staging/lustre/lustre/lov/lov_cl_internal.h
··· 370 370 struct ost_lvb lti_lvb; 371 371 struct cl_2queue lti_cl2q; 372 372 struct cl_page_list lti_plist; 373 - wait_queue_t lti_waiter; 373 + wait_queue_entry_t lti_waiter; 374 374 struct cl_attr lti_attr; 375 375 }; 376 376
+1 -1
drivers/staging/lustre/lustre/lov/lov_object.c
··· 371 371 struct lov_layout_raid0 *r0; 372 372 struct lu_site *site; 373 373 struct lu_site_bkt_data *bkt; 374 - wait_queue_t *waiter; 374 + wait_queue_entry_t *waiter; 375 375 376 376 r0 = &lov->u.raid0; 377 377 LASSERT(r0->lo_sub[idx] == los);
+3 -3
drivers/staging/lustre/lustre/obdclass/lu_object.c
··· 556 556 static struct lu_object *htable_lookup(struct lu_site *s, 557 557 struct cfs_hash_bd *bd, 558 558 const struct lu_fid *f, 559 - wait_queue_t *waiter, 559 + wait_queue_entry_t *waiter, 560 560 __u64 *version) 561 561 { 562 562 struct lu_site_bkt_data *bkt; ··· 670 670 struct lu_device *dev, 671 671 const struct lu_fid *f, 672 672 const struct lu_object_conf *conf, 673 - wait_queue_t *waiter) 673 + wait_queue_entry_t *waiter) 674 674 { 675 675 struct lu_object *o; 676 676 struct lu_object *shadow; ··· 750 750 { 751 751 struct lu_site_bkt_data *bkt; 752 752 struct lu_object *obj; 753 - wait_queue_t wait; 753 + wait_queue_entry_t wait; 754 754 755 755 while (1) { 756 756 obj = lu_object_find_try(env, dev, f, conf, &wait);
+1 -1
drivers/vfio/virqfd.c
··· 43 43 queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown); 44 44 } 45 45 46 - static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) 46 + static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 47 47 { 48 48 struct virqfd *virqfd = container_of(wait, struct virqfd, wait); 49 49 unsigned long flags = (unsigned long)key;
+1 -1
drivers/vhost/vhost.c
··· 165 165 add_wait_queue(wqh, &poll->wait); 166 166 } 167 167 168 - static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync, 168 + static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, 169 169 void *key) 170 170 { 171 171 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
+1 -1
drivers/vhost/vhost.h
··· 31 31 struct vhost_poll { 32 32 poll_table table; 33 33 wait_queue_head_t *wqh; 34 - wait_queue_t wait; 34 + wait_queue_entry_t wait; 35 35 struct vhost_work work; 36 36 unsigned long mask; 37 37 struct vhost_dev *dev;
+1 -1
fs/autofs4/autofs_i.h
··· 83 83 struct autofs_wait_queue { 84 84 wait_queue_head_t queue; 85 85 struct autofs_wait_queue *next; 86 - autofs_wqt_t wait_queue_token; 86 + autofs_wqt_t wait_queue_entry_token; 87 87 /* We use the following to see what we are waiting for */ 88 88 struct qstr name; 89 89 u32 dev;
+9 -9
fs/autofs4/waitq.c
··· 104 104 size_t pktsz; 105 105 106 106 pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n", 107 - (unsigned long) wq->wait_queue_token, 107 + (unsigned long) wq->wait_queue_entry_token, 108 108 wq->name.len, wq->name.name, type); 109 109 110 110 memset(&pkt, 0, sizeof(pkt)); /* For security reasons */ ··· 120 120 121 121 pktsz = sizeof(*mp); 122 122 123 - mp->wait_queue_token = wq->wait_queue_token; 123 + mp->wait_queue_entry_token = wq->wait_queue_entry_token; 124 124 mp->len = wq->name.len; 125 125 memcpy(mp->name, wq->name.name, wq->name.len); 126 126 mp->name[wq->name.len] = '\0'; ··· 133 133 134 134 pktsz = sizeof(*ep); 135 135 136 - ep->wait_queue_token = wq->wait_queue_token; 136 + ep->wait_queue_entry_token = wq->wait_queue_entry_token; 137 137 ep->len = wq->name.len; 138 138 memcpy(ep->name, wq->name.name, wq->name.len); 139 139 ep->name[wq->name.len] = '\0'; ··· 153 153 154 154 pktsz = sizeof(*packet); 155 155 156 - packet->wait_queue_token = wq->wait_queue_token; 156 + packet->wait_queue_entry_token = wq->wait_queue_entry_token; 157 157 packet->len = wq->name.len; 158 158 memcpy(packet->name, wq->name.name, wq->name.len); 159 159 packet->name[wq->name.len] = '\0'; ··· 428 428 return -ENOMEM; 429 429 } 430 430 431 - wq->wait_queue_token = autofs4_next_wait_queue; 431 + wq->wait_queue_entry_token = autofs4_next_wait_queue; 432 432 if (++autofs4_next_wait_queue == 0) 433 433 autofs4_next_wait_queue = 1; 434 434 wq->next = sbi->queues; ··· 461 461 } 462 462 463 463 pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n", 464 - (unsigned long) wq->wait_queue_token, wq->name.len, 464 + (unsigned long) wq->wait_queue_entry_token, wq->name.len, 465 465 wq->name.name, notify); 466 466 467 467 /* ··· 471 471 } else { 472 472 wq->wait_ctr++; 473 473 pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n", 474 - (unsigned long) wq->wait_queue_token, wq->name.len, 474 + (unsigned long) wq->wait_queue_entry_token, wq->name.len, 475 475 wq->name.name, notify); 476 476 mutex_unlock(&sbi->wq_mutex); 477 477 kfree(qstr.name); ··· 550 550 } 551 551 552 552 553 - int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status) 553 + int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_entry_token, int status) 554 554 { 555 555 struct autofs_wait_queue *wq, **wql; 556 556 557 557 mutex_lock(&sbi->wq_mutex); 558 558 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { 559 - if (wq->wait_queue_token == wait_queue_token) 559 + if (wq->wait_queue_entry_token == wait_queue_entry_token) 560 560 break; 561 561 } 562 562
+1 -1
fs/cachefiles/internal.h
··· 97 97 * backing file read tracking 98 98 */ 99 99 struct cachefiles_one_read { 100 - wait_queue_t monitor; /* link into monitored waitqueue */ 100 + wait_queue_entry_t monitor; /* link into monitored waitqueue */ 101 101 struct page *back_page; /* backing file page we're waiting for */ 102 102 struct page *netfs_page; /* netfs page we're going to fill */ 103 103 struct fscache_retrieval *op; /* retrieval op covering this */
+1 -1
fs/cachefiles/namei.c
··· 204 204 wait_queue_head_t *wq; 205 205 206 206 signed long timeout = 60 * HZ; 207 - wait_queue_t wait; 207 + wait_queue_entry_t wait; 208 208 bool requeue; 209 209 210 210 /* if the object we're waiting for is queued for processing,
+1 -1
fs/cachefiles/rdwr.c
··· 21 21 * - we use this to detect read completion of backing pages 22 22 * - the caller holds the waitqueue lock 23 23 */ 24 - static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode, 24 + static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, 25 25 int sync, void *_key) 26 26 { 27 27 struct cachefiles_one_read *monitor =
+2 -2
fs/dax.c
··· 84 84 }; 85 85 86 86 struct wait_exceptional_entry_queue { 87 - wait_queue_t wait; 87 + wait_queue_entry_t wait; 88 88 struct exceptional_entry_key key; 89 89 }; 90 90 ··· 108 108 return wait_table + hash; 109 109 } 110 110 111 - static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode, 111 + static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode, 112 112 int sync, void *keyp) 113 113 { 114 114 struct exceptional_entry_key *key = keyp;
+1 -1
fs/eventfd.c
··· 191 191 * This is used to atomically remove a wait queue entry from the eventfd wait 192 192 * queue head, and read/reset the counter value. 193 193 */ 194 - int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, 194 + int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, 195 195 __u64 *cnt) 196 196 { 197 197 unsigned long flags;
+5 -5
fs/eventpoll.c
··· 244 244 * Wait queue item that will be linked to the target file wait 245 245 * queue head. 246 246 */ 247 - wait_queue_t wait; 247 + wait_queue_entry_t wait; 248 248 249 249 /* The wait queue head that linked the "wait" wait queue item */ 250 250 wait_queue_head_t *whead; ··· 347 347 return !list_empty(p); 348 348 } 349 349 350 - static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) 350 + static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p) 351 351 { 352 352 return container_of(p, struct eppoll_entry, wait); 353 353 } 354 354 355 355 /* Get the "struct epitem" from a wait queue pointer */ 356 - static inline struct epitem *ep_item_from_wait(wait_queue_t *p) 356 + static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p) 357 357 { 358 358 return container_of(p, struct eppoll_entry, wait)->base; 359 359 } ··· 1078 1078 * mechanism. It is called by the stored file descriptors when they 1079 1079 * have events to report. 1080 1080 */ 1081 - static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) 1081 + static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 1082 1082 { 1083 1083 int pwake = 0; 1084 1084 unsigned long flags; ··· 1699 1699 int res = 0, eavail, timed_out = 0; 1700 1700 unsigned long flags; 1701 1701 u64 slack = 0; 1702 - wait_queue_t wait; 1702 + wait_queue_entry_t wait; 1703 1703 ktime_t expires, *to = NULL; 1704 1704 1705 1705 if (timeout > 0) {
+1 -1
fs/fs_pin.c
··· 34 34 35 35 void pin_kill(struct fs_pin *p) 36 36 { 37 - wait_queue_t wait; 37 + wait_queue_entry_t wait; 38 38 39 39 if (!p) { 40 40 rcu_read_unlock();
+2 -2
fs/nfs/nfs4proc.c
··· 6372 6372 }; 6373 6373 6374 6374 static int 6375 - nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key) 6375 + nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key) 6376 6376 { 6377 6377 int ret; 6378 6378 struct cb_notify_lock_args *cbnl = key; ··· 6415 6415 .inode = state->inode, 6416 6416 .owner = &owner, 6417 6417 .notified = false }; 6418 - wait_queue_t wait; 6418 + wait_queue_entry_t wait; 6419 6419 6420 6420 /* Don't bother with waitqueue if we don't expect a callback */ 6421 6421 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
+1 -1
fs/nilfs2/segment.c
··· 2161 2161 } 2162 2162 2163 2163 struct nilfs_segctor_wait_request { 2164 - wait_queue_t wq; 2164 + wait_queue_entry_t wq; 2165 2165 __u32 seq; 2166 2166 int err; 2167 2167 atomic_t done;
+2 -2
fs/orangefs/orangefs-bufmap.c
··· 47 47 if (m->c != -1) { 48 48 for (;;) { 49 49 if (likely(list_empty(&wait.task_list))) 50 - __add_wait_queue_tail(&m->q, &wait); 50 + __add_wait_queue_entry_tail(&m->q, &wait); 51 51 set_current_state(TASK_UNINTERRUPTIBLE); 52 52 53 53 if (m->c == -1) ··· 85 85 do { 86 86 long n = left, t; 87 87 if (likely(list_empty(&wait.task_list))) 88 - __add_wait_queue_tail_exclusive(&m->q, &wait); 88 + __add_wait_queue_entry_tail_exclusive(&m->q, &wait); 89 89 set_current_state(TASK_INTERRUPTIBLE); 90 90 91 91 if (m->c > 0)
+1 -1
fs/reiserfs/journal.c
··· 2956 2956 2957 2957 static void queue_log_writer(struct super_block *s) 2958 2958 { 2959 - wait_queue_t wait; 2959 + wait_queue_entry_t wait; 2960 2960 struct reiserfs_journal *journal = SB_JOURNAL(s); 2961 2961 set_bit(J_WRITERS_QUEUED, &journal->j_state); 2962 2962
+2 -2
fs/select.c
··· 180 180 return table->entry++; 181 181 } 182 182 183 - static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 183 + static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 184 184 { 185 185 struct poll_wqueues *pwq = wait->private; 186 186 DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task); ··· 206 206 return default_wake_function(&dummy_wait, mode, sync, key); 207 207 } 208 208 209 - static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 209 + static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 210 210 { 211 211 struct poll_table_entry *entry; 212 212
+1 -1
fs/signalfd.c
··· 43 43 if (likely(!waitqueue_active(wqh))) 44 44 return; 45 45 46 - /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ 46 + /* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */ 47 47 wake_up_poll(wqh, POLLHUP | POLLFREE); 48 48 } 49 49
+4 -4
fs/userfaultfd.c
··· 81 81 82 82 struct userfaultfd_wait_queue { 83 83 struct uffd_msg msg; 84 - wait_queue_t wq; 84 + wait_queue_entry_t wq; 85 85 struct userfaultfd_ctx *ctx; 86 86 bool waken; 87 87 }; ··· 91 91 unsigned long len; 92 92 }; 93 93 94 - static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, 94 + static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, 95 95 int wake_flags, void *key) 96 96 { 97 97 struct userfaultfd_wake_range *range = key; ··· 860 860 static inline struct userfaultfd_wait_queue *find_userfault_in( 861 861 wait_queue_head_t *wqh) 862 862 { 863 - wait_queue_t *wq; 863 + wait_queue_entry_t *wq; 864 864 struct userfaultfd_wait_queue *uwq; 865 865 866 866 VM_BUG_ON(!spin_is_locked(&wqh->lock)); ··· 1747 1747 static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f) 1748 1748 { 1749 1749 struct userfaultfd_ctx *ctx = f->private_data; 1750 - wait_queue_t *wq; 1750 + wait_queue_entry_t *wq; 1751 1751 struct userfaultfd_wait_queue *uwq; 1752 1752 unsigned long pending = 0, total = 0; 1753 1753
+1 -1
include/linux/blk-mq.h
··· 33 33 struct blk_mq_ctx **ctxs; 34 34 unsigned int nr_ctx; 35 35 36 - wait_queue_t dispatch_wait; 36 + wait_queue_entry_t dispatch_wait; 37 37 atomic_t wait_index; 38 38 39 39 struct blk_mq_tags *tags;
+2 -2
include/linux/eventfd.h
··· 37 37 struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); 38 38 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); 39 39 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); 40 - int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, 40 + int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, 41 41 __u64 *cnt); 42 42 43 43 #else /* CONFIG_EVENTFD */ ··· 73 73 } 74 74 75 75 static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, 76 - wait_queue_t *wait, __u64 *cnt) 76 + wait_queue_entry_t *wait, __u64 *cnt) 77 77 { 78 78 return -ENOSYS; 79 79 }
+1 -1
include/linux/kvm_irqfd.h
··· 46 46 struct kvm_kernel_irqfd { 47 47 /* Used for MSI fast-path */ 48 48 struct kvm *kvm; 49 - wait_queue_t wait; 49 + wait_queue_entry_t wait; 50 50 /* Update side is protected by irqfds.lock */ 51 51 struct kvm_kernel_irq_routing_entry irq_entry; 52 52 seqcount_t irq_entry_sc;
+1 -1
include/linux/pagemap.h
··· 524 524 /* 525 525 * Add an arbitrary waiter to a page's wait queue 526 526 */ 527 - extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); 527 + extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); 528 528 529 529 /* 530 530 * Fault everything in given userspace address range in.
+1 -1
include/linux/poll.h
··· 75 75 struct poll_table_entry { 76 76 struct file *filp; 77 77 unsigned long key; 78 - wait_queue_t wait; 78 + wait_queue_entry_t wait; 79 79 wait_queue_head_t *wait_address; 80 80 }; 81 81
+1 -1
include/linux/vfio.h
··· 183 183 void (*thread)(void *, void *); 184 184 void *data; 185 185 struct work_struct inject; 186 - wait_queue_t wait; 186 + wait_queue_entry_t wait; 187 187 poll_table pt; 188 188 struct work_struct shutdown; 189 189 struct virqfd **pvirqfd;
+35 -32
include/linux/wait.h
··· 10 10 #include <asm/current.h> 11 11 #include <uapi/linux/wait.h> 12 12 13 - typedef struct __wait_queue wait_queue_t; 14 - typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); 15 - int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 13 + typedef struct wait_queue_entry wait_queue_entry_t; 14 + typedef int (*wait_queue_func_t)(wait_queue_entry_t *wait, unsigned mode, int flags, void *key); 15 + int default_wake_function(wait_queue_entry_t *wait, unsigned mode, int flags, void *key); 16 16 17 - /* __wait_queue::flags */ 17 + /* wait_queue_entry::flags */ 18 18 #define WQ_FLAG_EXCLUSIVE 0x01 19 19 #define WQ_FLAG_WOKEN 0x02 20 20 21 - struct __wait_queue { 21 + /* 22 + * A single wait-queue entry structure: 23 + */ 24 + struct wait_queue_entry { 22 25 unsigned int flags; 23 26 void *private; 24 27 wait_queue_func_t func; ··· 37 34 38 35 struct wait_bit_queue { 39 36 struct wait_bit_key key; 40 - wait_queue_t wait; 37 + wait_queue_entry_t wait; 41 38 }; 42 39 43 40 struct __wait_queue_head { ··· 58 55 .task_list = { NULL, NULL } } 59 56 60 57 #define DECLARE_WAITQUEUE(name, tsk) \ 61 - wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk) 58 + wait_queue_entry_t name = __WAITQUEUE_INITIALIZER(name, tsk) 62 59 63 60 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ 64 61 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ ··· 91 88 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name) 92 89 #endif 93 90 94 - static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) 91 + static inline void init_waitqueue_entry(wait_queue_entry_t *q, struct task_struct *p) 95 92 { 96 93 q->flags = 0; 97 94 q->private = p; ··· 99 96 } 100 97 101 98 static inline void 102 - init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) 99 + init_waitqueue_func_entry(wait_queue_entry_t *q, wait_queue_func_t func) 103 100 { 104 101 q->flags = 0; 105 102 q->private = NULL; ··· 162 159 return waitqueue_active(wq); 163 160 } 164 161 165 - extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 166 - extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 167 - extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 162 + extern void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait); 163 + extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait); 164 + extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait); 168 165 169 - static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) 166 + static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *new) 170 167 { 171 168 list_add(&new->task_list, &head->task_list); 172 169 } ··· 175 172 * Used for wake-one threads: 176 173 */ 177 174 static inline void 178 - __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 175 + __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) 179 176 { 180 177 wait->flags |= WQ_FLAG_EXCLUSIVE; 181 178 __add_wait_queue(q, wait); 182 179 } 183 180 184 - static inline void __add_wait_queue_tail(wait_queue_head_t *head, 185 - wait_queue_t *new) 181 + static inline void __add_wait_queue_entry_tail(wait_queue_head_t *head, 182 + wait_queue_entry_t *new) 186 183 { 187 184 list_add_tail(&new->task_list, &head->task_list); 188 185 } 189 186 190 187 static inline void 191 - __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 188 + __add_wait_queue_entry_tail_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) 192 189 { 193 190 wait->flags |= WQ_FLAG_EXCLUSIVE; 194 - __add_wait_queue_tail(q, wait); 191 + __add_wait_queue_entry_tail(q, wait); 195 192 } 196 193 197 194 static inline void 198 - __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) 195 + __remove_wait_queue(wait_queue_head_t *head, wait_queue_entry_t *old) 199 196 { 200 197 list_del(&old->task_list); 201 198 } ··· 252 249 (!__builtin_constant_p(state) || \ 253 250 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ 254 251 255 - extern void init_wait_entry(wait_queue_t *__wait, int flags); 252 + extern void init_wait_entry(wait_queue_entry_t *__wait, int flags); 256 253 257 254 /* 258 255 * The below macro ___wait_event() has an explicit shadow of the __ret ··· 269 266 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ 270 267 ({ \ 271 268 __label__ __out; \ 272 - wait_queue_t __wait; \ 269 + wait_queue_entry_t __wait; \ 273 270 long __ret = ret; /* explicit shadow */ \ 274 271 \ 275 272 init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ ··· 623 620 __ret; \ 624 621 }) 625 622 626 - extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *); 627 - extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *); 623 + extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *); 624 + extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *); 628 625 629 626 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ 630 627 ({ \ ··· 970 967 /* 971 968 * Waitqueues which are removed from the waitqueue_head at wakeup time 972 969 */ 973 - void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); 974 - void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 975 - long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); 976 - void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 977 - long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); 978 - int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 979 - int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 980 - int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 970 + void prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); 971 + void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); 972 + long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state); 973 + void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait); 974 + long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout); 975 + int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); 976 + int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); 977 + int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key); 981 978 982 979 #define DEFINE_WAIT_FUNC(name, function) \ 983 - wait_queue_t name = { \ 980 + wait_queue_entry_t name = { \ 984 981 .private = current, \ 985 982 .func = function, \ 986 983 .task_list = LIST_HEAD_INIT((name).task_list), \
+1 -1
include/net/af_unix.h
··· 62 62 #define UNIX_GC_CANDIDATE 0 63 63 #define UNIX_GC_MAYBE_CYCLE 1 64 64 struct socket_wq peer_wq; 65 - wait_queue_t peer_wake; 65 + wait_queue_entry_t peer_wake; 66 66 }; 67 67 68 68 static inline struct unix_sock *unix_sk(const struct sock *sk)
+2 -2
include/uapi/linux/auto_fs.h
··· 26 26 #define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION 27 27 28 28 /* 29 - * The wait_queue_token (autofs_wqt_t) is part of a structure which is passed 29 + * The wait_queue_entry_token (autofs_wqt_t) is part of a structure which is passed 30 30 * back to the kernel via ioctl from userspace. On architectures where 32- and 31 31 * 64-bit userspace binaries can be executed it's important that the size of 32 32 * autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we ··· 49 49 50 50 struct autofs_packet_missing { 51 51 struct autofs_packet_hdr hdr; 52 - autofs_wqt_t wait_queue_token; 52 + autofs_wqt_t wait_queue_entry_token; 53 53 int len; 54 54 char name[NAME_MAX+1]; 55 55 };
+2 -2
include/uapi/linux/auto_fs4.h
··· 108 108 /* v4 multi expire (via pipe) */ 109 109 struct autofs_packet_expire_multi { 110 110 struct autofs_packet_hdr hdr; 111 - autofs_wqt_t wait_queue_token; 111 + autofs_wqt_t wait_queue_entry_token; 112 112 int len; 113 113 char name[NAME_MAX+1]; 114 114 }; ··· 123 123 /* autofs v5 common packet struct */ 124 124 struct autofs_v5_packet { 125 125 struct autofs_packet_hdr hdr; 126 - autofs_wqt_t wait_queue_token; 126 + autofs_wqt_t wait_queue_entry_token; 127 127 __u32 dev; 128 128 __u64 ino; 129 129 __u32 uid;
+2 -2
kernel/exit.c
··· 1004 1004 int __user *wo_stat; 1005 1005 struct rusage __user *wo_rusage; 1006 1006 1007 - wait_queue_t child_wait; 1007 + wait_queue_entry_t child_wait; 1008 1008 int notask_error; 1009 1009 }; 1010 1010 ··· 1541 1541 return 0; 1542 1542 } 1543 1543 1544 - static int child_wait_callback(wait_queue_t *wait, unsigned mode, 1544 + static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode, 1545 1545 int sync, void *key) 1546 1546 { 1547 1547 struct wait_opts *wo = container_of(wait, struct wait_opts,
+1 -1
kernel/futex.c
··· 225 225 * @requeue_pi_key: the requeue_pi target futex key 226 226 * @bitset: bitset for the optional bitmasked wakeup 227 227 * 228 - * We use this hashed waitqueue, instead of a normal wait_queue_t, so 228 + * We use this hashed waitqueue, instead of a normal wait_queue_entry_t, so 229 229 * we can wake only the relevant ones (hashed queues may be shared). 230 230 * 231 231 * A futex_q has a woken state, just like tasks have TASK_RUNNING.
+1 -1
kernel/sched/completion.c
··· 66 66 if (!x->done) { 67 67 DECLARE_WAITQUEUE(wait, current); 68 68 69 - __add_wait_queue_tail_exclusive(&x->wait, &wait); 69 + __add_wait_queue_entry_tail_exclusive(&x->wait, &wait); 70 70 do { 71 71 if (signal_pending_state(state, current)) { 72 72 timeout = -ERESTARTSYS;
+1 -1
kernel/sched/core.c
··· 3687 3687 exception_exit(prev_state); 3688 3688 } 3689 3689 3690 - int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, 3690 + int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 3691 3691 void *key) 3692 3692 { 3693 3693 return try_to_wake_up(curr->private, mode, wake_flags);
+21 -21
kernel/sched/wait.c
··· 21 21 22 22 EXPORT_SYMBOL(__init_waitqueue_head); 23 23 24 - void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 24 + void add_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait) 25 25 { 26 26 unsigned long flags; 27 27 ··· 32 32 } 33 33 EXPORT_SYMBOL(add_wait_queue); 34 34 35 - void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 35 + void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait) 36 36 { 37 37 unsigned long flags; 38 38 39 39 wait->flags |= WQ_FLAG_EXCLUSIVE; 40 40 spin_lock_irqsave(&q->lock, flags); 41 - __add_wait_queue_tail(q, wait); 41 + __add_wait_queue_entry_tail(q, wait); 42 42 spin_unlock_irqrestore(&q->lock, flags); 43 43 } 44 44 EXPORT_SYMBOL(add_wait_queue_exclusive); 45 45 46 - void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 46 + void remove_wait_queue(wait_queue_head_t *q, wait_queue_entry_t *wait) 47 47 { 48 48 unsigned long flags; 49 49 ··· 66 66 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 67 67 int nr_exclusive, int wake_flags, void *key) 68 68 { 69 - wait_queue_t *curr, *next; 69 + wait_queue_entry_t *curr, *next; 70 70 71 71 list_for_each_entry_safe(curr, next, &q->task_list, task_list) { 72 72 unsigned flags = curr->flags; ··· 170 170 * loads to move into the critical region). 171 171 */ 172 172 void 173 - prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) 173 + prepare_to_wait(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) 174 174 { 175 175 unsigned long flags; 176 176 ··· 184 184 EXPORT_SYMBOL(prepare_to_wait); 185 185 186 186 void 187 - prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) 187 + prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) 188 188 { 189 189 unsigned long flags; 190 190 191 191 wait->flags |= WQ_FLAG_EXCLUSIVE; 192 192 spin_lock_irqsave(&q->lock, flags); 193 193 if (list_empty(&wait->task_list)) 194 - __add_wait_queue_tail(q, wait); 194 + __add_wait_queue_entry_tail(q, wait); 195 195 set_current_state(state); 196 196 spin_unlock_irqrestore(&q->lock, flags); 197 197 } 198 198 EXPORT_SYMBOL(prepare_to_wait_exclusive); 199 199 200 - void init_wait_entry(wait_queue_t *wait, int flags) 200 + void init_wait_entry(wait_queue_entry_t *wait, int flags) 201 201 { 202 202 wait->flags = flags; 203 203 wait->private = current; ··· 206 206 } 207 207 EXPORT_SYMBOL(init_wait_entry); 208 208 209 - long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state) 209 + long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_entry_t *wait, int state) 210 210 { 211 211 unsigned long flags; 212 212 long ret = 0; ··· 230 230 } else { 231 231 if (list_empty(&wait->task_list)) { 232 232 if (wait->flags & WQ_FLAG_EXCLUSIVE) 233 - __add_wait_queue_tail(q, wait); 233 + __add_wait_queue_entry_tail(q, wait); 234 234 else 235 235 __add_wait_queue(q, wait); 236 236 } ··· 249 249 * condition in the caller before they add the wait 250 250 * entry to the wake queue. 251 251 */ 252 - int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait) 252 + int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) 253 253 { 254 254 if (likely(list_empty(&wait->task_list))) 255 - __add_wait_queue_tail(wq, wait); 255 + __add_wait_queue_entry_tail(wq, wait); 256 256 257 257 set_current_state(TASK_INTERRUPTIBLE); 258 258 if (signal_pending(current)) ··· 265 265 } 266 266 EXPORT_SYMBOL(do_wait_intr); 267 267 268 - int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait) 268 + int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) 269 269 { 270 270 if (likely(list_empty(&wait->task_list))) 271 - __add_wait_queue_tail(wq, wait); 271 + __add_wait_queue_entry_tail(wq, wait); 272 272 273 273 set_current_state(TASK_INTERRUPTIBLE); 274 274 if (signal_pending(current)) ··· 290 290 * the wait descriptor from the given waitqueue if still 291 291 * queued. 292 292 */ 293 - void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 293 + void finish_wait(wait_queue_head_t *q, wait_queue_entry_t *wait) 294 294 { 295 295 unsigned long flags; 296 296 ··· 316 316 } 317 317 EXPORT_SYMBOL(finish_wait); 318 318 319 - int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 319 + int autoremove_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 320 320 { 321 321 int ret = default_wake_function(wait, mode, sync, key); 322 322 ··· 351 351 * remove_wait_queue(&wq, &wait); 352 352 * 353 353 */ 354 - long wait_woken(wait_queue_t *wait, unsigned mode, long timeout) 354 + long wait_woken(wait_queue_entry_t *wait, unsigned mode, long timeout) 355 355 { 356 356 set_current_state(mode); /* A */ 357 357 /* ··· 375 375 } 376 376 EXPORT_SYMBOL(wait_woken); 377 377 378 - int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 378 + int woken_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 379 379 { 380 380 /* 381 381 * Although this function is called under waitqueue lock, LOCK ··· 391 391 } 392 392 EXPORT_SYMBOL(woken_wake_function); 393 393 394 - int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) 394 + int wake_bit_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 395 395 { 396 396 struct wait_bit_key *key = arg; 397 397 struct wait_bit_queue *wait_bit ··· 534 534 return bit_waitqueue(p, 0); 535 535 } 536 536 537 - static int wake_atomic_t_function(wait_queue_t *wait, unsigned mode, int sync, 537 + static int wake_atomic_t_function(wait_queue_entry_t *wait, unsigned mode, int sync, 538 538 void *arg) 539 539 { 540 540 struct wait_bit_key *key = arg;
+2 -2
kernel/workqueue.c
··· 2864 2864 EXPORT_SYMBOL_GPL(flush_work); 2865 2865 2866 2866 struct cwt_wait { 2867 - wait_queue_t wait; 2867 + wait_queue_entry_t wait; 2868 2868 struct work_struct *work; 2869 2869 }; 2870 2870 2871 - static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key) 2871 + static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 2872 2872 { 2873 2873 struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); 2874 2874
+5 -5
mm/filemap.c
··· 768 768 struct wait_page_queue { 769 769 struct page *page; 770 770 int bit_nr; 771 - wait_queue_t wait; 771 + wait_queue_entry_t wait; 772 772 }; 773 773 774 - static int wake_page_function(wait_queue_t *wait, unsigned mode, int sync, void *arg) 774 + static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg) 775 775 { 776 776 struct wait_page_key *key = arg; 777 777 struct wait_page_queue *wait_page ··· 834 834 struct page *page, int bit_nr, int state, bool lock) 835 835 { 836 836 struct wait_page_queue wait_page; 837 - wait_queue_t *wait = &wait_page.wait; 837 + wait_queue_entry_t *wait = &wait_page.wait; 838 838 int ret = 0; 839 839 840 840 init_wait(wait); ··· 847 847 848 848 if (likely(list_empty(&wait->task_list))) { 849 849 if (lock) 850 - __add_wait_queue_tail_exclusive(q, wait); 850 + __add_wait_queue_entry_tail_exclusive(q, wait); 851 851 else 852 852 __add_wait_queue(q, wait); 853 853 SetPageWaiters(page); ··· 907 907 * 908 908 * Add an arbitrary @waiter to the wait queue for the nominated @page. 909 909 */ 910 - void add_page_wait_queue(struct page *page, wait_queue_t *waiter) 910 + void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) 911 911 { 912 912 wait_queue_head_t *q = page_waitqueue(page); 913 913 unsigned long flags;
+4 -4
mm/memcontrol.c
··· 170 170 */ 171 171 poll_table pt; 172 172 wait_queue_head_t *wqh; 173 - wait_queue_t wait; 173 + wait_queue_entry_t wait; 174 174 struct work_struct remove; 175 175 }; 176 176 ··· 1479 1479 1480 1480 struct oom_wait_info { 1481 1481 struct mem_cgroup *memcg; 1482 - wait_queue_t wait; 1482 + wait_queue_entry_t wait; 1483 1483 }; 1484 1484 1485 - static int memcg_oom_wake_function(wait_queue_t *wait, 1485 + static int memcg_oom_wake_function(wait_queue_entry_t *wait, 1486 1486 unsigned mode, int sync, void *arg) 1487 1487 { 1488 1488 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; ··· 3725 3725 * 3726 3726 * Called with wqh->lock held and interrupts disabled. 3727 3727 */ 3728 - static int memcg_event_wake(wait_queue_t *wait, unsigned mode, 3728 + static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, 3729 3729 int sync, void *key) 3730 3730 { 3731 3731 struct mem_cgroup_event *event =
+1 -1
mm/mempool.c
··· 312 312 { 313 313 void *element; 314 314 unsigned long flags; 315 - wait_queue_t wait; 315 + wait_queue_entry_t wait; 316 316 gfp_t gfp_temp; 317 317 318 318 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO);
+1 -1
mm/shmem.c
··· 1902 1902 * entry unconditionally - even if something else had already woken the 1903 1903 * target. 1904 1904 */ 1905 - static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 1905 + static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 1906 1906 { 1907 1907 int ret = default_wake_function(wait, mode, sync, key); 1908 1908 list_del_init(&wait->task_list);
+2 -2
net/9p/trans_fd.c
··· 95 95 96 96 struct p9_poll_wait { 97 97 struct p9_conn *conn; 98 - wait_queue_t wait; 98 + wait_queue_entry_t wait; 99 99 wait_queue_head_t *wait_addr; 100 100 }; 101 101 ··· 522 522 clear_bit(Wworksched, &m->wsched); 523 523 } 524 524 525 - static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) 525 + static int p9_pollwake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) 526 526 { 527 527 struct p9_poll_wait *pwait = 528 528 container_of(wait, struct p9_poll_wait, wait);
+1 -1
net/bluetooth/bnep/core.c
··· 484 484 struct net_device *dev = s->dev; 485 485 struct sock *sk = s->sock->sk; 486 486 struct sk_buff *skb; 487 - wait_queue_t wait; 487 + wait_queue_entry_t wait; 488 488 489 489 BT_DBG(""); 490 490
+1 -1
net/bluetooth/cmtp/core.c
··· 280 280 struct cmtp_session *session = arg; 281 281 struct sock *sk = session->sock->sk; 282 282 struct sk_buff *skb; 283 - wait_queue_t wait; 283 + wait_queue_entry_t wait; 284 284 285 285 BT_DBG("session %p", session); 286 286
+1 -1
net/bluetooth/hidp/core.c
··· 1244 1244 static int hidp_session_thread(void *arg) 1245 1245 { 1246 1246 struct hidp_session *session = arg; 1247 - wait_queue_t ctrl_wait, intr_wait; 1247 + wait_queue_entry_t ctrl_wait, intr_wait; 1248 1248 1249 1249 BT_DBG("session %p", session); 1250 1250
+1 -1
net/core/datagram.c
··· 68 68 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; 69 69 } 70 70 71 - static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync, 71 + static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync, 72 72 void *key) 73 73 { 74 74 unsigned long bits = (unsigned long)key;
+2 -2
net/unix/af_unix.c
··· 343 343 * are still connected to it and there's no way to inform "a polling 344 344 * implementation" that it should let go of a certain wait queue 345 345 * 346 - * In order to propagate a wake up, a wait_queue_t of the client 346 + * In order to propagate a wake up, a wait_queue_entry_t of the client 347 347 * socket is enqueued on the peer_wait queue of the server socket 348 348 * whose wake function does a wake_up on the ordinary client socket 349 349 * wait queue. This connection is established whenever a write (or ··· 352 352 * was relayed. 353 353 */ 354 354 355 - static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags, 355 + static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, 356 356 void *key) 357 357 { 358 358 struct unix_sock *u;
+1 -1
sound/core/control.c
··· 1577 1577 struct snd_ctl_event ev; 1578 1578 struct snd_kctl_event *kev; 1579 1579 while (list_empty(&ctl->events)) { 1580 - wait_queue_t wait; 1580 + wait_queue_entry_t wait; 1581 1581 if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { 1582 1582 err = -EAGAIN; 1583 1583 goto __end_lock;
+1 -1
sound/core/hwdep.c
··· 85 85 int major = imajor(inode); 86 86 struct snd_hwdep *hw; 87 87 int err; 88 - wait_queue_t wait; 88 + wait_queue_entry_t wait; 89 89 90 90 if (major == snd_major) { 91 91 hw = snd_lookup_minor_data(iminor(inode),
+1 -1
sound/core/init.c
··· 989 989 */ 990 990 int snd_power_wait(struct snd_card *card, unsigned int power_state) 991 991 { 992 - wait_queue_t wait; 992 + wait_queue_entry_t wait; 993 993 int result = 0; 994 994 995 995 /* fastpath */
+2 -2
sound/core/oss/pcm_oss.c
··· 1554 1554 ssize_t result = 0; 1555 1555 snd_pcm_state_t state; 1556 1556 long res; 1557 - wait_queue_t wait; 1557 + wait_queue_entry_t wait; 1558 1558 1559 1559 runtime = substream->runtime; 1560 1560 init_waitqueue_entry(&wait, current); ··· 2387 2387 struct snd_pcm_oss_file *pcm_oss_file; 2388 2388 struct snd_pcm_oss_setup setup[2]; 2389 2389 int nonblock; 2390 - wait_queue_t wait; 2390 + wait_queue_entry_t wait; 2391 2391 2392 2392 err = nonseekable_open(inode, file); 2393 2393 if (err < 0)
+1 -1
sound/core/pcm_lib.c
··· 1904 1904 { 1905 1905 struct snd_pcm_runtime *runtime = substream->runtime; 1906 1906 int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; 1907 - wait_queue_t wait; 1907 + wait_queue_entry_t wait; 1908 1908 int err = 0; 1909 1909 snd_pcm_uframes_t avail = 0; 1910 1910 long wait_time, tout;
+2 -2
sound/core/pcm_native.c
··· 1652 1652 struct snd_card *card; 1653 1653 struct snd_pcm_runtime *runtime; 1654 1654 struct snd_pcm_substream *s; 1655 - wait_queue_t wait; 1655 + wait_queue_entry_t wait; 1656 1656 int result = 0; 1657 1657 int nonblock = 0; 1658 1658 ··· 2353 2353 static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream) 2354 2354 { 2355 2355 int err; 2356 - wait_queue_t wait; 2356 + wait_queue_entry_t wait; 2357 2357 2358 2358 if (pcm == NULL) { 2359 2359 err = -ENODEV;
+4 -4
sound/core/rawmidi.c
··· 368 368 int err; 369 369 struct snd_rawmidi *rmidi; 370 370 struct snd_rawmidi_file *rawmidi_file = NULL; 371 - wait_queue_t wait; 371 + wait_queue_entry_t wait; 372 372 373 373 if ((file->f_flags & O_APPEND) && !(file->f_flags & O_NONBLOCK)) 374 374 return -EINVAL; /* invalid combination */ ··· 1002 1002 while (count > 0) { 1003 1003 spin_lock_irq(&runtime->lock); 1004 1004 while (!snd_rawmidi_ready(substream)) { 1005 - wait_queue_t wait; 1005 + wait_queue_entry_t wait; 1006 1006 if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { 1007 1007 spin_unlock_irq(&runtime->lock); 1008 1008 return result > 0 ? result : -EAGAIN; ··· 1306 1306 while (count > 0) { 1307 1307 spin_lock_irq(&runtime->lock); 1308 1308 while (!snd_rawmidi_ready_append(substream, count)) { 1309 - wait_queue_t wait; 1309 + wait_queue_entry_t wait; 1310 1310 if (file->f_flags & O_NONBLOCK) { 1311 1311 spin_unlock_irq(&runtime->lock); 1312 1312 return result > 0 ? result : -EAGAIN; ··· 1338 1338 if (file->f_flags & O_DSYNC) { 1339 1339 spin_lock_irq(&runtime->lock); 1340 1340 while (runtime->avail != runtime->buffer_size) { 1341 - wait_queue_t wait; 1341 + wait_queue_entry_t wait; 1342 1342 unsigned int last_avail = runtime->avail; 1343 1343 init_waitqueue_entry(&wait, current); 1344 1344 add_wait_queue(&runtime->sleep, &wait);
+1 -1
sound/core/seq/seq_fifo.c
··· 179 179 { 180 180 struct snd_seq_event_cell *cell; 181 181 unsigned long flags; 182 - wait_queue_t wait; 182 + wait_queue_entry_t wait; 183 183 184 184 if (snd_BUG_ON(!f)) 185 185 return -EINVAL;
+1 -1
sound/core/seq/seq_memory.c
··· 227 227 struct snd_seq_event_cell *cell; 228 228 unsigned long flags; 229 229 int err = -EAGAIN; 230 - wait_queue_t wait; 230 + wait_queue_entry_t wait; 231 231 232 232 if (pool == NULL) 233 233 return -EINVAL;
+1 -1
sound/core/timer.c
··· 1964 1964 spin_lock_irq(&tu->qlock); 1965 1965 while ((long)count - result >= unit) { 1966 1966 while (!tu->qused) { 1967 - wait_queue_t wait; 1967 + wait_queue_entry_t wait; 1968 1968 1969 1969 if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { 1970 1970 err = -EAGAIN;
+1 -1
sound/isa/wavefront/wavefront_synth.c
··· 1782 1782 int val, int port, unsigned long timeout) 1783 1783 1784 1784 { 1785 - wait_queue_t wait; 1785 + wait_queue_entry_t wait; 1786 1786 1787 1787 init_waitqueue_entry(&wait, current); 1788 1788 spin_lock_irq(&dev->irq_lock);
+2 -2
sound/pci/mixart/mixart_core.c
··· 239 239 struct mixart_msg resp; 240 240 u32 msg_frame = 0; /* set to 0, so it's no notification to wait for, but the answer */ 241 241 int err; 242 - wait_queue_t wait; 242 + wait_queue_entry_t wait; 243 243 long timeout; 244 244 245 245 init_waitqueue_entry(&wait, current); ··· 284 284 struct mixart_msg *request, u32 notif_event) 285 285 { 286 286 int err; 287 - wait_queue_t wait; 287 + wait_queue_entry_t wait; 288 288 long timeout; 289 289 290 290 if (snd_BUG_ON(!notif_event))
+1 -1
sound/pci/ymfpci/ymfpci_main.c
··· 781 781 782 782 static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip) 783 783 { 784 - wait_queue_t wait; 784 + wait_queue_entry_t wait; 785 785 int loops = 4; 786 786 787 787 while (loops-- > 0) {
+1 -1
virt/kvm/eventfd.c
··· 184 184 * Called with wqh->lock held and interrupts disabled 185 185 */ 186 186 static int 187 - irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) 187 + irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 188 188 { 189 189 struct kvm_kernel_irqfd *irqfd = 190 190 container_of(wait, struct kvm_kernel_irqfd, wait);