Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: Data move path now uses bch2_trans_unlock_long()

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+23 -9
+11 -1
fs/bcachefs/btree_iter.c
··· 2833 2833 return p; 2834 2834 } 2835 2835 2836 + static inline void check_srcu_held_too_long(struct btree_trans *trans) 2837 + { 2838 + WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10), 2839 + "btree trans held srcu lock (delaying memory reclaim) for %lu seconds", 2840 + (jiffies - trans->srcu_lock_time) / HZ); 2841 + } 2842 + 2836 2843 void bch2_trans_srcu_unlock(struct btree_trans *trans) 2837 2844 { 2838 2845 if (trans->srcu_held) { ··· 2850 2843 if (path->cached && !btree_node_locked(path, 0)) 2851 2844 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset); 2852 2845 2846 + check_srcu_held_too_long(trans); 2853 2847 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); 2854 2848 trans->srcu_held = false; 2855 2849 } ··· 3082 3074 3083 3075 check_btree_paths_leaked(trans); 3084 3076 3085 - if (trans->srcu_held) 3077 + if (trans->srcu_held) { 3078 + check_srcu_held_too_long(trans); 3086 3079 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); 3080 + } 3087 3081 3088 3082 bch2_journal_preres_put(&c->journal, &trans->journal_preres); 3089 3083
+8 -5
fs/bcachefs/move.c
··· 147 147 { 148 148 struct moving_io *io; 149 149 150 - bch2_trans_unlock(ctxt->trans); 151 - 152 150 while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) { 151 + bch2_trans_unlock_long(ctxt->trans); 153 152 list_del(&io->read_list); 154 153 move_write(io); 155 154 } ··· 484 485 struct bch_fs *c = ctxt->trans->c; 485 486 u64 delay; 486 487 487 - if (ctxt->wait_on_copygc) { 488 - bch2_trans_unlock(ctxt->trans); 488 + if (ctxt->wait_on_copygc && !c->copygc_running) { 489 + bch2_trans_unlock_long(ctxt->trans); 489 490 wait_event_killable(c->copygc_running_wq, 490 491 !c->copygc_running || 491 492 kthread_should_stop()); ··· 494 495 do { 495 496 delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0; 496 497 498 + 497 499 if (delay) { 498 - bch2_trans_unlock(ctxt->trans); 500 + if (delay > HZ / 10) 501 + bch2_trans_unlock_long(ctxt->trans); 502 + else 503 + bch2_trans_unlock(ctxt->trans); 499 504 set_current_state(TASK_INTERRUPTIBLE); 500 505 } 501 506
+1
fs/bcachefs/move.h
··· 45 45 \ 46 46 if (_cond) \ 47 47 break; \ 48 + bch2_trans_unlock_long((_ctxt)->trans); \ 48 49 __wait_event((_ctxt)->wait, \ 49 50 bch2_moving_ctxt_next_pending_write(_ctxt) || \ 50 51 (cond_finished = (_cond))); \
+2 -2
fs/bcachefs/movinggc.c
··· 128 128 kfree(i); 129 129 } 130 130 131 - bch2_trans_unlock(ctxt->trans); 131 + bch2_trans_unlock_long(ctxt->trans); 132 132 } 133 133 134 134 static bool bucket_in_flight(struct buckets_in_flight *list, ··· 327 327 while (!ret && !kthread_should_stop()) { 328 328 bool did_work = false; 329 329 330 - bch2_trans_unlock(ctxt.trans); 330 + bch2_trans_unlock_long(ctxt.trans); 331 331 cond_resched(); 332 332 333 333 if (!c->copy_gc_enabled) {
+1 -1
fs/bcachefs/rebalance.c
··· 348 348 !kthread_should_stop() && 349 349 !atomic64_read(&r->work_stats.sectors_seen) && 350 350 !atomic64_read(&r->scan_stats.sectors_seen)) { 351 - bch2_trans_unlock(trans); 351 + bch2_trans_unlock_long(trans); 352 352 rebalance_wait(c); 353 353 } 354 354