Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sched-wait-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull wait_var_event updates from Ingo Molnar:
"This introduces the new wait_var_event() API, which is a more flexible
waiting primitive than wait_on_atomic_t().

All wait_on_atomic_t() users are migrated over to the new API and
wait_on_atomic_t() is removed. The migration fixes one bug and should
result in no functional changes for the other usecases"

* 'sched-wait-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/wait: Improve __var_waitqueue() code generation
sched/wait: Remove the wait_on_atomic_t() API
sched/wait, arch/mips: Fix and convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, fs/ocfs2: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, fs/nfs: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, fs/fscache: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, fs/btrfs: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, fs/afs: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, drivers/media: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait, drivers/drm: Convert wait_on_atomic_t() usage to the new wait_var_event() API
sched/wait: Introduce wait_var_event()

+147 -170
+2
arch/mips/kernel/process.c
··· 781 781 atomic_set(&task->mm->context.fp_mode_switching, 0); 782 782 preempt_enable(); 783 783 784 + wake_up_var(&task->mm->context.fp_mode_switching); 785 + 784 786 return 0; 785 787 } 786 788
+2 -2
arch/mips/kernel/traps.c
··· 1248 1248 * If an FP mode switch is currently underway, wait for it to 1249 1249 * complete before proceeding. 1250 1250 */ 1251 - wait_on_atomic_t(&current->mm->context.fp_mode_switching, 1252 - atomic_t_wait, TASK_KILLABLE); 1251 + wait_var_event(&current->mm->context.fp_mode_switching, 1252 + !atomic_read(&current->mm->context.fp_mode_switching)); 1253 1253 1254 1254 if (!used_math()) { 1255 1255 /* First time FP context user. */
+7 -6
drivers/gpu/drm/drm_dp_aux_dev.c
··· 177 177 res = pos - iocb->ki_pos; 178 178 iocb->ki_pos = pos; 179 179 180 - atomic_dec(&aux_dev->usecount); 181 - wake_up_atomic_t(&aux_dev->usecount); 180 + if (atomic_dec_and_test(&aux_dev->usecount)) 181 + wake_up_var(&aux_dev->usecount); 182 + 182 183 return res; 183 184 } 184 185 ··· 219 218 res = pos - iocb->ki_pos; 220 219 iocb->ki_pos = pos; 221 220 222 - atomic_dec(&aux_dev->usecount); 223 - wake_up_atomic_t(&aux_dev->usecount); 221 + if (atomic_dec_and_test(&aux_dev->usecount)) 222 + wake_up_var(&aux_dev->usecount); 223 + 224 224 return res; 225 225 } 226 226 ··· 279 277 mutex_unlock(&aux_idr_mutex); 280 278 281 279 atomic_dec(&aux_dev->usecount); 282 - wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait, 283 - TASK_UNINTERRUPTIBLE); 280 + wait_var_event(&aux_dev->usecount, !atomic_read(&aux_dev->usecount)); 284 281 285 282 minor = aux_dev->index; 286 283 if (aux_dev->dev)
+4 -10
drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
··· 271 271 u32 seqno; 272 272 }; 273 273 274 - static int wait_atomic_timeout(atomic_t *p, unsigned int mode) 275 - { 276 - return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT; 277 - } 278 - 279 274 static bool wait_for_ready(struct igt_wakeup *w) 280 275 { 281 276 DEFINE_WAIT(ready); 282 277 283 278 set_bit(IDLE, &w->flags); 284 279 if (atomic_dec_and_test(w->done)) 285 - wake_up_atomic_t(w->done); 280 + wake_up_var(w->done); 286 281 287 282 if (test_bit(STOP, &w->flags)) 288 283 goto out; ··· 294 299 out: 295 300 clear_bit(IDLE, &w->flags); 296 301 if (atomic_dec_and_test(w->set)) 297 - wake_up_atomic_t(w->set); 302 + wake_up_var(w->set); 298 303 299 304 return !test_bit(STOP, &w->flags); 300 305 } ··· 337 342 atomic_set(ready, 0); 338 343 wake_up_all(wq); 339 344 340 - wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE); 345 + wait_var_event(set, !atomic_read(set)); 341 346 atomic_set(ready, count); 342 347 atomic_set(done, count); 343 348 } ··· 345 350 static int igt_wakeup(void *arg) 346 351 { 347 352 I915_RND_STATE(prng); 348 - const int state = TASK_UNINTERRUPTIBLE; 349 353 struct intel_engine_cs *engine = arg; 350 354 struct igt_wakeup *waiters; 351 355 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); ··· 412 418 * that they are ready for the next test. We wait until all 413 419 * threads are complete and waiting for us (i.e. not a seqno). 414 420 */ 415 - err = wait_on_atomic_t(&done, wait_atomic_timeout, state); 421 + err = wait_var_event_timeout(&done, !atomic_read(&done), 10 * HZ); 416 422 if (err) { 417 423 pr_err("Timed out waiting for %d remaining waiters\n", 418 424 atomic_read(&done));
+4 -4
drivers/media/platform/qcom/venus/hfi.c
··· 106 106 107 107 if (!empty) { 108 108 mutex_unlock(&core->lock); 109 - wait_on_atomic_t(&core->insts_count, atomic_t_wait, 110 - TASK_UNINTERRUPTIBLE); 109 + wait_var_event(&core->insts_count, 110 + !atomic_read(&core->insts_count)); 111 111 mutex_lock(&core->lock); 112 112 } 113 113 ··· 229 229 230 230 mutex_lock(&core->lock); 231 231 list_del_init(&inst->list); 232 - atomic_dec(&core->insts_count); 233 - wake_up_atomic_t(&core->insts_count); 232 + if (atomic_dec_and_test(&core->insts_count)) 233 + wake_up_var(&core->insts_count); 234 234 mutex_unlock(&core->lock); 235 235 } 236 236 EXPORT_SYMBOL_GPL(hfi_session_destroy);
+3 -3
fs/afs/cell.c
··· 25 25 static void afs_dec_cells_outstanding(struct afs_net *net) 26 26 { 27 27 if (atomic_dec_and_test(&net->cells_outstanding)) 28 - wake_up_atomic_t(&net->cells_outstanding); 28 + wake_up_var(&net->cells_outstanding); 29 29 } 30 30 31 31 /* ··· 764 764 afs_queue_cell_manager(net); 765 765 766 766 _debug("wait"); 767 - wait_on_atomic_t(&net->cells_outstanding, atomic_t_wait, 768 - TASK_UNINTERRUPTIBLE); 767 + wait_var_event(&net->cells_outstanding, 768 + !atomic_read(&net->cells_outstanding)); 769 769 _leave(""); 770 770 }
+3 -3
fs/afs/rxrpc.c
··· 103 103 } 104 104 105 105 _debug("outstanding %u", atomic_read(&net->nr_outstanding_calls)); 106 - wait_on_atomic_t(&net->nr_outstanding_calls, atomic_t_wait, 107 - TASK_UNINTERRUPTIBLE); 106 + wait_var_event(&net->nr_outstanding_calls, 107 + !atomic_read(&net->nr_outstanding_calls)); 108 108 _debug("no outstanding calls"); 109 109 110 110 kernel_sock_shutdown(net->socket, SHUT_RDWR); ··· 175 175 trace_afs_call(call, afs_call_trace_free, 0, o, 176 176 __builtin_return_address(0)); 177 177 if (o == 0) 178 - wake_up_atomic_t(&net->nr_outstanding_calls); 178 + wake_up_var(&net->nr_outstanding_calls); 179 179 } 180 180 } 181 181
+3 -3
fs/afs/server.c
··· 25 25 static void afs_dec_servers_outstanding(struct afs_net *net) 26 26 { 27 27 if (atomic_dec_and_test(&net->servers_outstanding)) 28 - wake_up_atomic_t(&net->servers_outstanding); 28 + wake_up_var(&net->servers_outstanding); 29 29 } 30 30 31 31 /* ··· 521 521 afs_queue_server_manager(net); 522 522 523 523 _debug("wait"); 524 - wait_on_atomic_t(&net->servers_outstanding, atomic_t_wait, 525 - TASK_UNINTERRUPTIBLE); 524 + wait_var_event(&net->servers_outstanding, 525 + !atomic_read(&net->servers_outstanding)); 526 526 _leave(""); 527 527 } 528 528
+6 -8
fs/btrfs/extent-tree.c
··· 3990 3990 bg = btrfs_lookup_block_group(fs_info, bytenr); 3991 3991 ASSERT(bg); 3992 3992 if (atomic_dec_and_test(&bg->nocow_writers)) 3993 - wake_up_atomic_t(&bg->nocow_writers); 3993 + wake_up_var(&bg->nocow_writers); 3994 3994 /* 3995 3995 * Once for our lookup and once for the lookup done by a previous call 3996 3996 * to btrfs_inc_nocow_writers() ··· 4001 4001 4002 4002 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) 4003 4003 { 4004 - wait_on_atomic_t(&bg->nocow_writers, atomic_t_wait, 4005 - TASK_UNINTERRUPTIBLE); 4004 + wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers)); 4006 4005 } 4007 4006 4008 4007 static const char *alloc_name(u64 flags) ··· 6525 6526 bg = btrfs_lookup_block_group(fs_info, start); 6526 6527 ASSERT(bg); 6527 6528 if (atomic_dec_and_test(&bg->reservations)) 6528 - wake_up_atomic_t(&bg->reservations); 6529 + wake_up_var(&bg->reservations); 6529 6530 btrfs_put_block_group(bg); 6530 6531 } 6531 6532 ··· 6551 6552 down_write(&space_info->groups_sem); 6552 6553 up_write(&space_info->groups_sem); 6553 6554 6554 - wait_on_atomic_t(&bg->reservations, atomic_t_wait, 6555 - TASK_UNINTERRUPTIBLE); 6555 + wait_var_event(&bg->reservations, !atomic_read(&bg->reservations)); 6556 6556 } 6557 6557 6558 6558 /** ··· 11059 11061 ret = btrfs_start_write_no_snapshotting(root); 11060 11062 if (ret) 11061 11063 break; 11062 - wait_on_atomic_t(&root->will_be_snapshotted, atomic_t_wait, 11063 - TASK_UNINTERRUPTIBLE); 11064 + wait_var_event(&root->will_be_snapshotted, 11065 + !atomic_read(&root->will_be_snapshotted)); 11064 11066 } 11065 11067 }
+1 -1
fs/btrfs/ioctl.c
··· 723 723 btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv); 724 724 dec_and_free: 725 725 if (atomic_dec_and_test(&root->will_be_snapshotted)) 726 - wake_up_atomic_t(&root->will_be_snapshotted); 726 + wake_up_var(&root->will_be_snapshotted); 727 727 free_pending: 728 728 kfree(pending_snapshot->root_item); 729 729 btrfs_free_path(pending_snapshot->path);
+4 -3
fs/fscache/cookie.c
··· 557 557 * n_active reaches 0). This makes sure outstanding reads and writes 558 558 * have completed. 559 559 */ 560 - if (!atomic_dec_and_test(&cookie->n_active)) 561 - wait_on_atomic_t(&cookie->n_active, atomic_t_wait, 562 - TASK_UNINTERRUPTIBLE); 560 + if (!atomic_dec_and_test(&cookie->n_active)) { 561 + wait_var_event(&cookie->n_active, 562 + !atomic_read(&cookie->n_active)); 563 + } 563 564 564 565 /* Make sure any pending writes are cancelled. */ 565 566 if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
-5
fs/nfs/inode.c
··· 85 85 } 86 86 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 87 87 88 - int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode) 89 - { 90 - return nfs_wait_killable(mode); 91 - } 92 - 93 88 /** 94 89 * nfs_compat_user_ino64 - returns the user-visible inode number 95 90 * @fileid: 64-bit fileid
+3 -3
fs/nfs/pagelist.c
··· 98 98 int 99 99 nfs_iocounter_wait(struct nfs_lock_context *l_ctx) 100 100 { 101 - return wait_on_atomic_t(&l_ctx->io_count, nfs_wait_atomic_killable, 102 - TASK_KILLABLE); 101 + return wait_var_event_killable(&l_ctx->io_count, 102 + !atomic_read(&l_ctx->io_count)); 103 103 } 104 104 105 105 /** ··· 395 395 } 396 396 if (l_ctx != NULL) { 397 397 if (atomic_dec_and_test(&l_ctx->io_count)) { 398 - wake_up_atomic_t(&l_ctx->io_count); 398 + wake_up_var(&l_ctx->io_count); 399 399 if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags)) 400 400 rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq); 401 401 }
+1 -1
fs/nfs/pnfs_nfs.c
··· 245 245 { 246 246 if (list_empty(pages)) { 247 247 if (atomic_dec_and_test(&cinfo->mds->rpcs_out)) 248 - wake_up_atomic_t(&cinfo->mds->rpcs_out); 248 + wake_up_var(&cinfo->mds->rpcs_out); 249 249 /* don't call nfs_commitdata_release - it tries to put 250 250 * the open_context which is not acquired until nfs_init_commit 251 251 * which has not been called on @data */
+3 -3
fs/nfs/write.c
··· 1620 1620 1621 1621 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1622 1622 { 1623 - return wait_on_atomic_t(&cinfo->rpcs_out, 1624 - nfs_wait_atomic_killable, TASK_KILLABLE); 1623 + return wait_var_event_killable(&cinfo->rpcs_out, 1624 + !atomic_read(&cinfo->rpcs_out)); 1625 1625 } 1626 1626 1627 1627 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) ··· 1632 1632 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1633 1633 { 1634 1634 if (atomic_dec_and_test(&cinfo->rpcs_out)) 1635 - wake_up_atomic_t(&cinfo->rpcs_out); 1635 + wake_up_var(&cinfo->rpcs_out); 1636 1636 } 1637 1637 1638 1638 void nfs_commitdata_release(struct nfs_commit_data *data)
+5 -4
fs/ocfs2/filecheck.c
··· 134 134 { 135 135 struct ocfs2_filecheck_entry *p; 136 136 137 - if (!atomic_dec_and_test(&entry->fs_count)) 138 - wait_on_atomic_t(&entry->fs_count, atomic_t_wait, 139 - TASK_UNINTERRUPTIBLE); 137 + if (!atomic_dec_and_test(&entry->fs_count)) { 138 + wait_var_event(&entry->fs_count, 139 + !atomic_read(&entry->fs_count)); 140 + } 140 141 141 142 spin_lock(&entry->fs_fcheck->fc_lock); 142 143 while (!list_empty(&entry->fs_fcheck->fc_head)) { ··· 184 183 ocfs2_filecheck_sysfs_put(struct ocfs2_filecheck_sysfs_entry *entry) 185 184 { 186 185 if (atomic_dec_and_test(&entry->fs_count)) 187 - wake_up_atomic_t(&entry->fs_count); 186 + wake_up_var(&entry->fs_count); 188 187 } 189 188 190 189 static struct ocfs2_filecheck_sysfs_entry *
+1 -1
include/linux/fscache-cache.h
··· 496 496 497 497 static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie) 498 498 { 499 - wake_up_atomic_t(&cookie->n_active); 499 + wake_up_var(&cookie->n_active); 500 500 } 501 501 502 502 /**
+69 -26
include/linux/wait_bit.h
··· 10 10 struct wait_bit_key { 11 11 void *flags; 12 12 int bit_nr; 13 - #define WAIT_ATOMIC_T_BIT_NR -1 14 13 unsigned long timeout; 15 14 }; 16 15 ··· 21 22 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \ 22 23 { .flags = word, .bit_nr = bit, } 23 24 24 - #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \ 25 - { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } 26 - 27 25 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); 28 - typedef int wait_atomic_t_action_f(atomic_t *counter, unsigned int mode); 29 26 30 27 void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); 31 28 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); 32 29 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); 33 30 void wake_up_bit(void *word, int bit); 34 - void wake_up_atomic_t(atomic_t *p); 35 31 int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); 36 32 int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); 37 33 int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); 38 - int out_of_line_wait_on_atomic_t(atomic_t *p, wait_atomic_t_action_f action, unsigned int mode); 39 34 struct wait_queue_head *bit_waitqueue(void *word, int bit); 40 35 extern void __init wait_bit_init(void); 41 36 ··· 50 57 extern int bit_wait_io(struct wait_bit_key *key, int mode); 51 58 extern int bit_wait_timeout(struct wait_bit_key *key, int mode); 52 59 extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); 53 - extern int atomic_t_wait(atomic_t *counter, unsigned int mode); 54 60 55 61 /** 56 62 * wait_on_bit - wait for a bit to be cleared ··· 235 243 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 236 244 } 237 245 238 - /** 239 - * wait_on_atomic_t - Wait for an atomic_t to become 0 240 - * @val: The atomic value being waited on, a kernel virtual address 241 - * @action: the function used to sleep, which may take special actions 242 - * @mode: the task state to sleep in 243 - * 244 - * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for 245 - * the purpose of getting a waitqueue, but we set the key to a bit number 246 - * outside of the target 'word'. 247 - */ 248 - static inline 249 - int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode) 250 - { 251 - might_sleep(); 252 - if (atomic_read(val) == 0) 253 - return 0; 254 - return out_of_line_wait_on_atomic_t(val, action, mode); 255 - } 246 + extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags); 247 + extern void wake_up_var(void *var); 248 + extern wait_queue_head_t *__var_waitqueue(void *p); 249 + 250 + #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \ 251 + ({ \ 252 + __label__ __out; \ 253 + struct wait_queue_head *__wq_head = __var_waitqueue(var); \ 254 + struct wait_bit_queue_entry __wbq_entry; \ 255 + long __ret = ret; /* explicit shadow */ \ 256 + \ 257 + init_wait_var_entry(&__wbq_entry, var, \ 258 + exclusive ? WQ_FLAG_EXCLUSIVE : 0); \ 259 + for (;;) { \ 260 + long __int = prepare_to_wait_event(__wq_head, \ 261 + &__wbq_entry.wq_entry, \ 262 + state); \ 263 + if (condition) \ 264 + break; \ 265 + \ 266 + if (___wait_is_interruptible(state) && __int) { \ 267 + __ret = __int; \ 268 + goto __out; \ 269 + } \ 270 + \ 271 + cmd; \ 272 + } \ 273 + finish_wait(__wq_head, &__wbq_entry.wq_entry); \ 274 + __out: __ret; \ 275 + }) 276 + 277 + #define __wait_var_event(var, condition) \ 278 + ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 279 + schedule()) 280 + 281 + #define wait_var_event(var, condition) \ 282 + do { \ 283 + might_sleep(); \ 284 + if (condition) \ 285 + break; \ 286 + __wait_var_event(var, condition); \ 287 + } while (0) 288 + 289 + #define __wait_var_event_killable(var, condition) \ 290 + ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ 291 + schedule()) 292 + 293 + #define wait_var_event_killable(var, condition) \ 294 + ({ \ 295 + int __ret = 0; \ 296 + might_sleep(); \ 297 + if (!(condition)) \ 298 + __ret = __wait_var_event_killable(var, condition); \ 299 + __ret; \ 300 + }) 301 + 302 + #define __wait_var_event_timeout(var, condition, timeout) \ 303 + ___wait_var_event(var, ___wait_cond_timeout(condition), \ 304 + TASK_UNINTERRUPTIBLE, 0, timeout, \ 305 + __ret = schedule_timeout(__ret)) 306 + 307 + #define wait_var_event_timeout(var, condition, timeout) \ 308 + ({ \ 309 + long __ret = timeout; \ 310 + might_sleep(); \ 311 + if (!___wait_cond_timeout(condition)) \ 312 + __ret = __wait_var_event_timeout(var, condition, timeout); \ 313 + __ret; \ 314 + }) 256 315 257 316 #endif /* _LINUX_WAIT_BIT_H */
+26 -84
kernel/sched/wait_bit.c
··· 149 149 } 150 150 EXPORT_SYMBOL(wake_up_bit); 151 151 152 - /* 153 - * Manipulate the atomic_t address to produce a better bit waitqueue table hash 154 - * index (we're keying off bit -1, but that would produce a horrible hash 155 - * value). 156 - */ 157 - static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p) 152 + wait_queue_head_t *__var_waitqueue(void *p) 158 153 { 159 - if (BITS_PER_LONG == 64) { 160 - unsigned long q = (unsigned long)p; 161 - 162 - return bit_waitqueue((void *)(q & ~1), q & 1); 163 - } 164 - return bit_waitqueue(p, 0); 154 + return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS); 165 155 } 156 + EXPORT_SYMBOL(__var_waitqueue); 166 157 167 - static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, 168 - void *arg) 158 + static int 159 + var_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode, 160 + int sync, void *arg) 169 161 { 170 162 struct wait_bit_key *key = arg; 171 - struct wait_bit_queue_entry *wait_bit = container_of(wq_entry, struct wait_bit_queue_entry, wq_entry); 172 - atomic_t *val = key->flags; 163 + struct wait_bit_queue_entry *wbq_entry = 164 + container_of(wq_entry, struct wait_bit_queue_entry, wq_entry); 173 165 174 - if (wait_bit->key.flags != key->flags || 175 - wait_bit->key.bit_nr != key->bit_nr || 176 - atomic_read(val) != 0) 166 + if (wbq_entry->key.flags != key->flags || 167 + wbq_entry->key.bit_nr != key->bit_nr) 177 168 return 0; 178 169 179 170 return autoremove_wake_function(wq_entry, mode, sync, key); 180 171 } 181 172 182 - /* 183 - * To allow interruptible waiting and asynchronous (i.e. nonblocking) waiting, 184 - * the actions of __wait_on_atomic_t() are permitted return codes. Nonzero 185 - * return codes halt waiting and return. 186 - */ 187 - static __sched 188 - int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, 189 - wait_atomic_t_action_f action, unsigned int mode) 173 + void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var, int flags) 190 174 { 191 - atomic_t *val; 192 - int ret = 0; 193 - 194 - do { 195 - prepare_to_wait(wq_head, &wbq_entry->wq_entry, mode); 196 - val = wbq_entry->key.flags; 197 - if (atomic_read(val) == 0) 198 - break; 199 - ret = (*action)(val, mode); 200 - } while (!ret && atomic_read(val) != 0); 201 - finish_wait(wq_head, &wbq_entry->wq_entry); 202 - 203 - return ret; 175 + *wbq_entry = (struct wait_bit_queue_entry){ 176 + .key = { 177 + .flags = (var), 178 + .bit_nr = -1, 179 + }, 180 + .wq_entry = { 181 + .private = current, 182 + .func = var_wake_function, 183 + .entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry), 184 + }, 185 + }; 204 186 } 187 + EXPORT_SYMBOL(init_wait_var_entry); 205 188 206 - #define DEFINE_WAIT_ATOMIC_T(name, p) \ 207 - struct wait_bit_queue_entry name = { \ 208 - .key = __WAIT_ATOMIC_T_KEY_INITIALIZER(p), \ 209 - .wq_entry = { \ 210 - .private = current, \ 211 - .func = wake_atomic_t_function, \ 212 - .entry = \ 213 - LIST_HEAD_INIT((name).wq_entry.entry), \ 214 - }, \ 215 - } 216 - 217 - __sched int out_of_line_wait_on_atomic_t(atomic_t *p, 218 - wait_atomic_t_action_f action, 219 - unsigned int mode) 189 + void wake_up_var(void *var) 220 190 { 221 - struct wait_queue_head *wq_head = atomic_t_waitqueue(p); 222 - DEFINE_WAIT_ATOMIC_T(wq_entry, p); 223 - 224 - return __wait_on_atomic_t(wq_head, &wq_entry, action, mode); 191 + __wake_up_bit(__var_waitqueue(var), var, -1); 225 192 } 226 - EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); 227 - 228 - __sched int atomic_t_wait(atomic_t *counter, unsigned int mode) 229 - { 230 - schedule(); 231 - if (signal_pending_state(mode, current)) 232 - return -EINTR; 233 - 234 - return 0; 235 - } 236 - EXPORT_SYMBOL(atomic_t_wait); 237 - 238 - /** 239 - * wake_up_atomic_t - Wake up a waiter on a atomic_t 240 - * @p: The atomic_t being waited on, a kernel virtual address 241 - * 242 - * Wake up anyone waiting for the atomic_t to go to zero. 243 - * 244 - * Abuse the bit-waker function and its waitqueue hash table set (the atomic_t 245 - * check is done by the waiter's wake function, not the by the waker itself). 246 - */ 247 - void wake_up_atomic_t(atomic_t *p) 248 - { 249 - __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR); 250 - } 251 - EXPORT_SYMBOL(wake_up_atomic_t); 193 + EXPORT_SYMBOL(wake_up_var); 252 194 253 195 __sched int bit_wait(struct wait_bit_key *word, int mode) 254 196 {