Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

btrfs: replace waitqueue_actvie with cond_wake_up

Use the wrappers and reduce the amount of low-level details about the
waitqueue management.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>

+40 -91
+1 -6
fs/btrfs/compression.c
··· 990 990 btrfs_compress_op[idx]->free_workspace(workspace); 991 991 atomic_dec(total_ws); 992 992 wake: 993 - /* 994 - * Make sure counter is updated before we wake up waiters. 995 - */ 996 - smp_mb(); 997 - if (waitqueue_active(ws_wait)) 998 - wake_up(ws_wait); 993 + cond_wake_up(ws_wait); 999 994 } 1000 995 1001 996 static void free_workspace(int type, struct list_head *ws)
+3 -6
fs/btrfs/delayed-inode.c
··· 460 460 { 461 461 int seq = atomic_inc_return(&delayed_root->items_seq); 462 462 463 - /* 464 - * atomic_dec_return implies a barrier for waitqueue_active 465 - */ 463 + /* atomic_dec_return implies a barrier */ 466 464 if ((atomic_dec_return(&delayed_root->items) < 467 - BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) && 468 - waitqueue_active(&delayed_root->wait)) 469 - wake_up(&delayed_root->wait); 465 + BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0)) 466 + cond_wake_up_nomb(&delayed_root->wait); 470 467 } 471 468 472 469 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+4 -6
fs/btrfs/dev-replace.c
··· 1009 1009 ASSERT(atomic_read(&dev_replace->read_locks) > 0); 1010 1010 ASSERT(atomic_read(&dev_replace->blocking_readers) > 0); 1011 1011 read_lock(&dev_replace->lock); 1012 - if (atomic_dec_and_test(&dev_replace->blocking_readers) && 1013 - waitqueue_active(&dev_replace->read_lock_wq)) 1014 - wake_up(&dev_replace->read_lock_wq); 1012 + /* Barrier implied by atomic_dec_and_test */ 1013 + if (atomic_dec_and_test(&dev_replace->blocking_readers)) 1014 + cond_wake_up_nomb(&dev_replace->read_lock_wq); 1015 1015 } 1016 1016 1017 1017 void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info) ··· 1022 1022 void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) 1023 1023 { 1024 1024 percpu_counter_sub(&fs_info->bio_counter, amount); 1025 - 1026 - if (waitqueue_active(&fs_info->replace_wait)) 1027 - wake_up(&fs_info->replace_wait); 1025 + cond_wake_up_nomb(&fs_info->replace_wait); 1028 1026 } 1029 1027 1030 1028 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
+1 -6
fs/btrfs/extent-tree.c
··· 11081 11081 void btrfs_end_write_no_snapshotting(struct btrfs_root *root) 11082 11082 { 11083 11083 percpu_counter_dec(&root->subv_writers->counter); 11084 - /* 11085 - * Make sure counter is updated before we wake up waiters. 11086 - */ 11087 - smp_mb(); 11088 - if (waitqueue_active(&root->subv_writers->wait)) 11089 - wake_up(&root->subv_writers->wait); 11084 + cond_wake_up(&root->subv_writers->wait); 11090 11085 } 11091 11086 11092 11087 int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
+3 -6
fs/btrfs/inode.c
··· 1156 1156 nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> 1157 1157 PAGE_SHIFT; 1158 1158 1159 - /* 1160 - * atomic_sub_return implies a barrier for waitqueue_active 1161 - */ 1159 + /* atomic_sub_return implies a barrier */ 1162 1160 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < 1163 - 5 * SZ_1M && 1164 - waitqueue_active(&fs_info->async_submit_wait)) 1165 - wake_up(&fs_info->async_submit_wait); 1161 + 5 * SZ_1M) 1162 + cond_wake_up_nomb(&fs_info->async_submit_wait); 1166 1163 1167 1164 if (async_cow->inode) 1168 1165 submit_compressed_extents(async_cow->inode, async_cow);
+11 -23
fs/btrfs/locking.c
··· 66 66 write_lock(&eb->lock); 67 67 WARN_ON(atomic_read(&eb->spinning_writers)); 68 68 atomic_inc(&eb->spinning_writers); 69 - /* 70 - * atomic_dec_and_test implies a barrier for waitqueue_active 71 - */ 72 - if (atomic_dec_and_test(&eb->blocking_writers) && 73 - waitqueue_active(&eb->write_lock_wq)) 74 - wake_up(&eb->write_lock_wq); 69 + /* atomic_dec_and_test implies a barrier */ 70 + if (atomic_dec_and_test(&eb->blocking_writers)) 71 + cond_wake_up_nomb(&eb->write_lock_wq); 75 72 } else if (rw == BTRFS_READ_LOCK_BLOCKING) { 76 73 BUG_ON(atomic_read(&eb->blocking_readers) == 0); 77 74 read_lock(&eb->lock); 78 75 atomic_inc(&eb->spinning_readers); 79 - /* 80 - * atomic_dec_and_test implies a barrier for waitqueue_active 81 - */ 82 - if (atomic_dec_and_test(&eb->blocking_readers) && 83 - waitqueue_active(&eb->read_lock_wq)) 84 - wake_up(&eb->read_lock_wq); 76 + /* atomic_dec_and_test implies a barrier */ 77 + if (atomic_dec_and_test(&eb->blocking_readers)) 78 + cond_wake_up_nomb(&eb->read_lock_wq); 85 79 } 86 80 } 87 81 ··· 215 221 } 216 222 btrfs_assert_tree_read_locked(eb); 217 223 WARN_ON(atomic_read(&eb->blocking_readers) == 0); 218 - /* 219 - * atomic_dec_and_test implies a barrier for waitqueue_active 220 - */ 221 - if (atomic_dec_and_test(&eb->blocking_readers) && 222 - waitqueue_active(&eb->read_lock_wq)) 223 - wake_up(&eb->read_lock_wq); 224 + /* atomic_dec_and_test implies a barrier */ 225 + if (atomic_dec_and_test(&eb->blocking_readers)) 226 + cond_wake_up_nomb(&eb->read_lock_wq); 224 227 atomic_dec(&eb->read_locks); 225 228 } 226 229 ··· 266 275 if (blockers) { 267 276 WARN_ON(atomic_read(&eb->spinning_writers)); 268 277 atomic_dec(&eb->blocking_writers); 269 - /* 270 - * Make sure counter is updated before we wake up waiters. 271 - */ 278 + /* Use the lighter barrier after atomic */ 272 279 smp_mb__after_atomic(); 273 - if (waitqueue_active(&eb->write_lock_wq)) 274 - wake_up(&eb->write_lock_wq); 280 + cond_wake_up_nomb(&eb->write_lock_wq); 275 281 } else { 276 282 WARN_ON(atomic_read(&eb->spinning_writers) != 1); 277 283 atomic_dec(&eb->spinning_writers);
+4 -10
fs/btrfs/ordered-data.c
··· 343 343 344 344 if (entry->bytes_left == 0) { 345 345 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 346 - /* 347 - * Implicit memory barrier after test_and_set_bit 348 - */ 349 - if (waitqueue_active(&entry->wait)) 350 - wake_up(&entry->wait); 346 + /* test_and_set_bit implies a barrier */ 347 + cond_wake_up_nomb(&entry->wait); 351 348 } else { 352 349 ret = 1; 353 350 } ··· 407 410 408 411 if (entry->bytes_left == 0) { 409 412 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 410 - /* 411 - * Implicit memory barrier after test_and_set_bit 412 - */ 413 - if (waitqueue_active(&entry->wait)) 414 - wake_up(&entry->wait); 413 + /* test_and_set_bit implies a barrier */ 414 + cond_wake_up_nomb(&entry->wait); 415 415 } else { 416 416 ret = 1; 417 417 }
+1 -6
fs/btrfs/transaction.c
··· 877 877 atomic_dec(&cur_trans->num_writers); 878 878 extwriter_counter_dec(cur_trans, trans->type); 879 879 880 - /* 881 - * Make sure counter is updated before we wake up waiters. 882 - */ 883 - smp_mb(); 884 - if (waitqueue_active(&cur_trans->writer_wait)) 885 - wake_up(&cur_trans->writer_wait); 880 + cond_wake_up(&cur_trans->writer_wait); 886 881 btrfs_put_transaction(cur_trans); 887 882 888 883 if (current->journal_info == trans)
+12 -22
fs/btrfs/tree-log.c
··· 222 222 void btrfs_end_log_trans(struct btrfs_root *root) 223 223 { 224 224 if (atomic_dec_and_test(&root->log_writers)) { 225 - /* 226 - * Implicit memory barrier after atomic_dec_and_test 227 - */ 228 - if (waitqueue_active(&root->log_writer_wait)) 229 - wake_up(&root->log_writer_wait); 225 + /* atomic_dec_and_test implies a barrier */ 226 + cond_wake_up_nomb(&root->log_writer_wait); 230 227 } 231 228 } 232 229 ··· 2985 2988 2986 2989 mutex_lock(&log_root_tree->log_mutex); 2987 2990 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2988 - /* 2989 - * Implicit memory barrier after atomic_dec_and_test 2990 - */ 2991 - if (waitqueue_active(&log_root_tree->log_writer_wait)) 2992 - wake_up(&log_root_tree->log_writer_wait); 2991 + /* atomic_dec_and_test implies a barrier */ 2992 + cond_wake_up_nomb(&log_root_tree->log_writer_wait); 2993 2993 } 2994 2994 2995 2995 if (ret) { ··· 3110 3116 mutex_unlock(&log_root_tree->log_mutex); 3111 3117 3112 3118 /* 3113 - * The barrier before waitqueue_active is needed so all the updates 3114 - * above are seen by the woken threads. It might not be necessary, but 3115 - * proving that seems to be hard. 3119 + * The barrier before waitqueue_active (in cond_wake_up) is needed so 3120 + * all the updates above are seen by the woken threads. It might not be 3121 + * necessary, but proving that seems to be hard. 3116 3122 */ 3117 - smp_mb(); 3118 - if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 3119 - wake_up(&log_root_tree->log_commit_wait[index2]); 3123 + cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3120 3124 out: 3121 3125 mutex_lock(&root->log_mutex); 3122 3126 btrfs_remove_all_log_ctxs(root, index1, ret); ··· 3123 3131 mutex_unlock(&root->log_mutex); 3124 3132 3125 3133 /* 3126 - * The barrier before waitqueue_active is needed so all the updates 3127 - * above are seen by the woken threads. It might not be necessary, but 3128 - * proving that seems to be hard. 3134 + * The barrier before waitqueue_active (in cond_wake_up) is needed so 3135 + * all the updates above are seen by the woken threads. It might not be 3136 + * necessary, but proving that seems to be hard. 3129 3137 */ 3130 - smp_mb(); 3131 - if (waitqueue_active(&root->log_commit_wait[index1])) 3132 - wake_up(&root->log_commit_wait[index1]); 3138 + cond_wake_up(&root->log_commit_wait[index1]); 3133 3139 return ret; 3134 3140 } 3135 3141