Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Pass mode to wait_on_atomic_t() action funcs and provide default actions

Make wait_on_atomic_t() pass the TASK_* mode onto its action function as an
extra argument and make it 'unsigned int throughout.

Also, consolidate a bunch of identical action functions into a default
function that can do the appropriate thing for the mode.

Also, change the argument name in the bit_wait*() function declarations to
reflect the fact that it's the mode and not the bit number.

[Peter Z gives this a grudging ACK, but thinks that the whole atomic_t wait
should be done differently, though he's not immediately sure as to how]

Signed-off-by: David Howells <dhowells@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
cc: Ingo Molnar <mingo@kernel.org>

+37 -98
+1 -13
arch/mips/kernel/traps.c
··· 1233 1233 return NOTIFY_OK; 1234 1234 } 1235 1235 1236 - static int wait_on_fp_mode_switch(atomic_t *p) 1237 - { 1238 - /* 1239 - * The FP mode for this task is currently being switched. That may 1240 - * involve modifications to the format of this tasks FP context which 1241 - * make it unsafe to proceed with execution for the moment. Instead, 1242 - * schedule some other task. 1243 - */ 1244 - schedule(); 1245 - return 0; 1246 - } 1247 - 1248 1236 static int enable_restore_fp_context(int msa) 1249 1237 { 1250 1238 int err, was_fpu_owner, prior_msa; ··· 1242 1254 * complete before proceeding. 1243 1255 */ 1244 1256 wait_on_atomic_t(&current->mm->context.fp_mode_switching, 1245 - wait_on_fp_mode_switch, TASK_KILLABLE); 1257 + atomic_t_wait, TASK_KILLABLE); 1246 1258 1247 1259 if (!used_math()) { 1248 1260 /* First time FP context user. */
+1 -7
drivers/gpu/drm/drm_dp_aux_dev.c
··· 263 263 return aux_dev; 264 264 } 265 265 266 - static int auxdev_wait_atomic_t(atomic_t *p) 267 - { 268 - schedule(); 269 - return 0; 270 - } 271 - 272 266 void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux) 273 267 { 274 268 struct drm_dp_aux_dev *aux_dev; ··· 277 283 mutex_unlock(&aux_idr_mutex); 278 284 279 285 atomic_dec(&aux_dev->usecount); 280 - wait_on_atomic_t(&aux_dev->usecount, auxdev_wait_atomic_t, 286 + wait_on_atomic_t(&aux_dev->usecount, atomic_t_wait, 281 287 TASK_UNINTERRUPTIBLE); 282 288 283 289 minor = aux_dev->index;
+2 -8
drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
··· 271 271 u32 seqno; 272 272 }; 273 273 274 - static int wait_atomic(atomic_t *p) 275 - { 276 - schedule(); 277 - return 0; 278 - } 279 - 280 - static int wait_atomic_timeout(atomic_t *p) 274 + static int wait_atomic_timeout(atomic_t *p, unsigned int mode) 281 275 { 282 276 return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT; 283 277 } ··· 342 348 atomic_set(ready, 0); 343 349 wake_up_all(wq); 344 350 345 - wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE); 351 + wait_on_atomic_t(set, atomic_t_wait, TASK_UNINTERRUPTIBLE); 346 352 atomic_set(ready, count); 347 353 atomic_set(done, count); 348 354 }
+1 -7
drivers/media/platform/qcom/venus/hfi.c
··· 88 88 return ret; 89 89 } 90 90 91 - static int core_deinit_wait_atomic_t(atomic_t *p) 92 - { 93 - schedule(); 94 - return 0; 95 - } 96 - 97 91 int hfi_core_deinit(struct venus_core *core, bool blocking) 98 92 { 99 93 int ret = 0, empty; ··· 106 112 107 113 if (!empty) { 108 114 mutex_unlock(&core->lock); 109 - wait_on_atomic_t(&core->insts_count, core_deinit_wait_atomic_t, 115 + wait_on_atomic_t(&core->insts_count, atomic_t_wait, 110 116 TASK_UNINTERRUPTIBLE); 111 117 mutex_lock(&core->lock); 112 118 }
+1 -7
fs/afs/rxrpc.c
··· 41 41 42 42 static DECLARE_WORK(afs_charge_preallocation_work, afs_charge_preallocation); 43 43 44 - static int afs_wait_atomic_t(atomic_t *p) 45 - { 46 - schedule(); 47 - return 0; 48 - } 49 - 50 44 /* 51 45 * open an RxRPC socket and bind it to be a server for callback notifications 52 46 * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT ··· 115 121 } 116 122 117 123 _debug("outstanding %u", atomic_read(&afs_outstanding_calls)); 118 - wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t, 124 + wait_on_atomic_t(&afs_outstanding_calls, atomic_t_wait, 119 125 TASK_UNINTERRUPTIBLE); 120 126 _debug("no outstanding calls"); 121 127
+3 -24
fs/btrfs/extent-tree.c
··· 4016 4016 btrfs_put_block_group(bg); 4017 4017 } 4018 4018 4019 - static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a) 4020 - { 4021 - schedule(); 4022 - return 0; 4023 - } 4024 - 4025 4019 void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) 4026 4020 { 4027 - wait_on_atomic_t(&bg->nocow_writers, 4028 - btrfs_wait_nocow_writers_atomic_t, 4021 + wait_on_atomic_t(&bg->nocow_writers, atomic_t_wait, 4029 4022 TASK_UNINTERRUPTIBLE); 4030 4023 } 4031 4024 ··· 6588 6595 btrfs_put_block_group(bg); 6589 6596 } 6590 6597 6591 - static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a) 6592 - { 6593 - schedule(); 6594 - return 0; 6595 - } 6596 - 6597 6598 void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) 6598 6599 { 6599 6600 struct btrfs_space_info *space_info = bg->space_info; ··· 6610 6623 down_write(&space_info->groups_sem); 6611 6624 up_write(&space_info->groups_sem); 6612 6625 6613 - wait_on_atomic_t(&bg->reservations, 6614 - btrfs_wait_bg_reservations_atomic_t, 6626 + wait_on_atomic_t(&bg->reservations, atomic_t_wait, 6615 6627 TASK_UNINTERRUPTIBLE); 6616 6628 } 6617 6629 ··· 11092 11106 return 1; 11093 11107 } 11094 11108 11095 - static int wait_snapshotting_atomic_t(atomic_t *a) 11096 - { 11097 - schedule(); 11098 - return 0; 11099 - } 11100 - 11101 11109 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root) 11102 11110 { 11103 11111 while (true) { ··· 11100 11120 ret = btrfs_start_write_no_snapshotting(root); 11101 11121 if (ret) 11102 11122 break; 11103 - wait_on_atomic_t(&root->will_be_snapshotted, 11104 - wait_snapshotting_atomic_t, 11123 + wait_on_atomic_t(&root->will_be_snapshotted, atomic_t_wait, 11105 11124 TASK_UNINTERRUPTIBLE); 11106 11125 } 11107 11126 }
+1 -1
fs/fscache/cookie.c
··· 558 558 * have completed. 559 559 */ 560 560 if (!atomic_dec_and_test(&cookie->n_active)) 561 - wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t, 561 + wait_on_atomic_t(&cookie->n_active, atomic_t_wait, 562 562 TASK_UNINTERRUPTIBLE); 563 563 564 564 /* Make sure any pending writes are cancelled. */
-2
fs/fscache/internal.h
··· 97 97 return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq); 98 98 } 99 99 100 - extern int fscache_wait_atomic_t(atomic_t *); 101 - 102 100 /* 103 101 * object.c 104 102 */
-9
fs/fscache/main.c
··· 195 195 } 196 196 197 197 module_exit(fscache_exit); 198 - 199 - /* 200 - * wait_on_atomic_t() sleep function for uninterruptible waiting 201 - */ 202 - int fscache_wait_atomic_t(atomic_t *p) 203 - { 204 - schedule(); 205 - return 0; 206 - }
+2 -2
fs/nfs/inode.c
··· 85 85 } 86 86 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable); 87 87 88 - int nfs_wait_atomic_killable(atomic_t *p) 88 + int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode) 89 89 { 90 - return nfs_wait_killable(TASK_KILLABLE); 90 + return nfs_wait_killable(mode); 91 91 } 92 92 93 93 /**
+1 -1
fs/nfs/internal.h
··· 388 388 void nfs_zap_acl_cache(struct inode *inode); 389 389 extern bool nfs_check_cache_invalid(struct inode *, unsigned long); 390 390 extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); 391 - extern int nfs_wait_atomic_killable(atomic_t *p); 391 + extern int nfs_wait_atomic_killable(atomic_t *p, unsigned int mode); 392 392 393 393 /* super.c */ 394 394 extern const struct super_operations nfs_sops;
+1 -7
fs/ocfs2/filecheck.c
··· 129 129 ocfs2_filecheck_show, 130 130 ocfs2_filecheck_store); 131 131 132 - static int ocfs2_filecheck_sysfs_wait(atomic_t *p) 133 - { 134 - schedule(); 135 - return 0; 136 - } 137 - 138 132 static void 139 133 ocfs2_filecheck_sysfs_free(struct ocfs2_filecheck_sysfs_entry *entry) 140 134 { 141 135 struct ocfs2_filecheck_entry *p; 142 136 143 137 if (!atomic_dec_and_test(&entry->fs_count)) 144 - wait_on_atomic_t(&entry->fs_count, ocfs2_filecheck_sysfs_wait, 138 + wait_on_atomic_t(&entry->fs_count, atomic_t_wait, 145 139 TASK_UNINTERRUPTIBLE); 146 140 147 141 spin_lock(&entry->fs_fcheck->fc_lock);
+9 -6
include/linux/wait_bit.h
··· 26 26 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, } 27 27 28 28 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); 29 + typedef int wait_atomic_t_action_f(atomic_t *counter, unsigned int mode); 30 + 29 31 void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); 30 32 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); 31 33 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); ··· 36 34 int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); 37 35 int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); 38 36 int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); 39 - int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode); 37 + int out_of_line_wait_on_atomic_t(atomic_t *p, wait_atomic_t_action_f action, unsigned int mode); 40 38 struct wait_queue_head *bit_waitqueue(void *word, int bit); 41 39 extern void __init wait_bit_init(void); 42 40 ··· 53 51 }, \ 54 52 } 55 53 56 - extern int bit_wait(struct wait_bit_key *key, int bit); 57 - extern int bit_wait_io(struct wait_bit_key *key, int bit); 58 - extern int bit_wait_timeout(struct wait_bit_key *key, int bit); 59 - extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit); 54 + extern int bit_wait(struct wait_bit_key *key, int mode); 55 + extern int bit_wait_io(struct wait_bit_key *key, int mode); 56 + extern int bit_wait_timeout(struct wait_bit_key *key, int mode); 57 + extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); 58 + extern int atomic_t_wait(atomic_t *counter, unsigned int mode); 60 59 61 60 /** 62 61 * wait_on_bit - wait for a bit to be cleared ··· 254 251 * outside of the target 'word'. 255 252 */ 256 253 static inline 257 - int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) 254 + int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode) 258 255 { 259 256 might_sleep(); 260 257 if (atomic_read(val) == 0)
+14 -4
kernel/sched/wait_bit.c
··· 183 183 */ 184 184 static __sched 185 185 int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, 186 - int (*action)(atomic_t *), unsigned mode) 186 + wait_atomic_t_action_f action, unsigned int mode) 187 187 { 188 188 atomic_t *val; 189 189 int ret = 0; ··· 193 193 val = wbq_entry->key.flags; 194 194 if (atomic_read(val) == 0) 195 195 break; 196 - ret = (*action)(val); 196 + ret = (*action)(val, mode); 197 197 } while (!ret && atomic_read(val) != 0); 198 198 finish_wait(wq_head, &wbq_entry->wq_entry); 199 199 return ret; ··· 210 210 }, \ 211 211 } 212 212 213 - __sched int out_of_line_wait_on_atomic_t(atomic_t *p, int (*action)(atomic_t *), 214 - unsigned mode) 213 + __sched int out_of_line_wait_on_atomic_t(atomic_t *p, 214 + wait_atomic_t_action_f action, 215 + unsigned int mode) 215 216 { 216 217 struct wait_queue_head *wq_head = atomic_t_waitqueue(p); 217 218 DEFINE_WAIT_ATOMIC_T(wq_entry, p); ··· 220 219 return __wait_on_atomic_t(wq_head, &wq_entry, action, mode); 221 220 } 222 221 EXPORT_SYMBOL(out_of_line_wait_on_atomic_t); 222 + 223 + __sched int atomic_t_wait(atomic_t *counter, unsigned int mode) 224 + { 225 + schedule(); 226 + if (signal_pending_state(mode, current)) 227 + return -EINTR; 228 + return 0; 229 + } 230 + EXPORT_SYMBOL(atomic_t_wait); 223 231 224 232 /** 225 233 * wake_up_atomic_t - Wake up a waiter on a atomic_t