···564564 continue_at_nobarrier(cl, journal_write, system_wq);565565}566566567567+static void journal_write_unlock(struct closure *cl)568568+{569569+ struct cache_set *c = container_of(cl, struct cache_set, journal.io);570570+571571+ c->journal.io_in_flight = 0;572572+ spin_unlock(&c->journal.lock);573573+}574574+567575static void journal_write_unlocked(struct closure *cl)568576 __releases(c->journal.lock)569577{···586578 bio_list_init(&list);587579588580 if (!w->need_write) {589589- /*590590- * XXX: have to unlock closure before we unlock journal lock,591591- * else we race with bch_journal(). But this way we race592592- * against cache set unregister. Doh.593593- */594594- set_closure_fn(cl, NULL, NULL);595595- closure_sub(cl, CLOSURE_RUNNING + 1);596596- spin_unlock(&c->journal.lock);597597- return;581581+ closure_return_with_destructor(cl, journal_write_unlock);598582 } else if (journal_full(&c->journal)) {599583 journal_reclaim(c);600584 spin_unlock(&c->journal.lock);···662662663663 w->need_write = true;664664665665- if (closure_trylock(cl, &c->cl))666666- journal_write_unlocked(cl);667667- else665665+ if (!c->journal.io_in_flight) {666666+ c->journal.io_in_flight = 1;667667+ closure_call(cl, journal_write_unlocked, NULL, &c->cl);668668+ } else {668669 spin_unlock(&c->journal.lock);670670+ }669671}670672671673static struct journal_write *journal_wait_for_write(struct cache_set *c,···795793{796794 struct journal *j = &c->journal;797795798798- closure_init_unlocked(&j->io);799796 spin_lock_init(&j->lock);800797 INIT_DELAYED_WORK(&j->work, journal_write_work);801798
+1
drivers/md/bcache/journal.h
···104104 /* used when waiting because the journal was full */105105 struct closure_waitlist wait;106106 struct closure io;107107+ int io_in_flight;107108 struct delayed_work work;108109109110 /* Number of blocks free in the bucket(s) we're currently writing to */