Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

closures: CLOSURE_CALLBACK() to fix type punning

Control flow integrity is now checking that type signatures match on
indirect function calls. That breaks closures, which embed a work_struct
in a closure in such a way that a closure_fn may also be used as a
workqueue fn by the underlying closure code.

So we have to change closure fns to take a work_struct as their
argument - but that results in a loss of clarity, as closure fns have
different semantics from normal workqueue functions (they run owning a
ref on the closure, which must be released with continue_at() or
closure_return()).

Thus, this patc introduces CLOSURE_CALLBACK() and closure_type() macros
as suggested by Kees, to smooth things over a bit.

Suggested-by: Kees Cook <keescook@chromium.org>
Cc: Coly Li <colyli@suse.de>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+127 -124
+7 -7
drivers/md/bcache/btree.c
··· 293 293 w->journal = NULL; 294 294 } 295 295 296 - static void btree_node_write_unlock(struct closure *cl) 296 + static CLOSURE_CALLBACK(btree_node_write_unlock) 297 297 { 298 - struct btree *b = container_of(cl, struct btree, io); 298 + closure_type(b, struct btree, io); 299 299 300 300 up(&b->io_mutex); 301 301 } 302 302 303 - static void __btree_node_write_done(struct closure *cl) 303 + static CLOSURE_CALLBACK(__btree_node_write_done) 304 304 { 305 - struct btree *b = container_of(cl, struct btree, io); 305 + closure_type(b, struct btree, io); 306 306 struct btree_write *w = btree_prev_write(b); 307 307 308 308 bch_bbio_free(b->bio, b->c); ··· 315 315 closure_return_with_destructor(cl, btree_node_write_unlock); 316 316 } 317 317 318 - static void btree_node_write_done(struct closure *cl) 318 + static CLOSURE_CALLBACK(btree_node_write_done) 319 319 { 320 - struct btree *b = container_of(cl, struct btree, io); 320 + closure_type(b, struct btree, io); 321 321 322 322 bio_free_pages(b->bio); 323 - __btree_node_write_done(cl); 323 + __btree_node_write_done(&cl->work); 324 324 } 325 325 326 326 static void btree_node_write_endio(struct bio *bio)
+10 -10
drivers/md/bcache/journal.c
··· 723 723 closure_put(&w->c->journal.io); 724 724 } 725 725 726 - static void journal_write(struct closure *cl); 726 + static CLOSURE_CALLBACK(journal_write); 727 727 728 - static void journal_write_done(struct closure *cl) 728 + static CLOSURE_CALLBACK(journal_write_done) 729 729 { 730 - struct journal *j = container_of(cl, struct journal, io); 730 + closure_type(j, struct journal, io); 731 731 struct journal_write *w = (j->cur == j->w) 732 732 ? &j->w[1] 733 733 : &j->w[0]; ··· 736 736 continue_at_nobarrier(cl, journal_write, bch_journal_wq); 737 737 } 738 738 739 - static void journal_write_unlock(struct closure *cl) 739 + static CLOSURE_CALLBACK(journal_write_unlock) 740 740 __releases(&c->journal.lock) 741 741 { 742 - struct cache_set *c = container_of(cl, struct cache_set, journal.io); 742 + closure_type(c, struct cache_set, journal.io); 743 743 744 744 c->journal.io_in_flight = 0; 745 745 spin_unlock(&c->journal.lock); 746 746 } 747 747 748 - static void journal_write_unlocked(struct closure *cl) 748 + static CLOSURE_CALLBACK(journal_write_unlocked) 749 749 __releases(c->journal.lock) 750 750 { 751 - struct cache_set *c = container_of(cl, struct cache_set, journal.io); 751 + closure_type(c, struct cache_set, journal.io); 752 752 struct cache *ca = c->cache; 753 753 struct journal_write *w = c->journal.cur; 754 754 struct bkey *k = &c->journal.key; ··· 823 823 continue_at(cl, journal_write_done, NULL); 824 824 } 825 825 826 - static void journal_write(struct closure *cl) 826 + static CLOSURE_CALLBACK(journal_write) 827 827 { 828 - struct cache_set *c = container_of(cl, struct cache_set, journal.io); 828 + closure_type(c, struct cache_set, journal.io); 829 829 830 830 spin_lock(&c->journal.lock); 831 - journal_write_unlocked(cl); 831 + journal_write_unlocked(&cl->work); 832 832 } 833 833 834 834 static void journal_try_write(struct cache_set *c)
+8 -8
drivers/md/bcache/movinggc.c
··· 35 35 36 36 /* Moving GC - IO loop */ 37 37 38 - static void moving_io_destructor(struct closure *cl) 38 + static CLOSURE_CALLBACK(moving_io_destructor) 39 39 { 40 - struct moving_io *io = container_of(cl, struct moving_io, cl); 40 + closure_type(io, struct moving_io, cl); 41 41 42 42 kfree(io); 43 43 } 44 44 45 - static void write_moving_finish(struct closure *cl) 45 + static CLOSURE_CALLBACK(write_moving_finish) 46 46 { 47 - struct moving_io *io = container_of(cl, struct moving_io, cl); 47 + closure_type(io, struct moving_io, cl); 48 48 struct bio *bio = &io->bio.bio; 49 49 50 50 bio_free_pages(bio); ··· 89 89 bch_bio_map(bio, NULL); 90 90 } 91 91 92 - static void write_moving(struct closure *cl) 92 + static CLOSURE_CALLBACK(write_moving) 93 93 { 94 - struct moving_io *io = container_of(cl, struct moving_io, cl); 94 + closure_type(io, struct moving_io, cl); 95 95 struct data_insert_op *op = &io->op; 96 96 97 97 if (!op->status) { ··· 113 113 continue_at(cl, write_moving_finish, op->wq); 114 114 } 115 115 116 - static void read_moving_submit(struct closure *cl) 116 + static CLOSURE_CALLBACK(read_moving_submit) 117 117 { 118 - struct moving_io *io = container_of(cl, struct moving_io, cl); 118 + closure_type(io, struct moving_io, cl); 119 119 struct bio *bio = &io->bio.bio; 120 120 121 121 bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
+37 -37
drivers/md/bcache/request.c
··· 25 25 26 26 struct kmem_cache *bch_search_cache; 27 27 28 - static void bch_data_insert_start(struct closure *cl); 28 + static CLOSURE_CALLBACK(bch_data_insert_start); 29 29 30 30 static unsigned int cache_mode(struct cached_dev *dc) 31 31 { ··· 55 55 56 56 /* Insert data into cache */ 57 57 58 - static void bch_data_insert_keys(struct closure *cl) 58 + static CLOSURE_CALLBACK(bch_data_insert_keys) 59 59 { 60 - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 60 + closure_type(op, struct data_insert_op, cl); 61 61 atomic_t *journal_ref = NULL; 62 62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL; 63 63 int ret; ··· 136 136 continue_at(cl, bch_data_insert_keys, op->wq); 137 137 } 138 138 139 - static void bch_data_insert_error(struct closure *cl) 139 + static CLOSURE_CALLBACK(bch_data_insert_error) 140 140 { 141 - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 141 + closure_type(op, struct data_insert_op, cl); 142 142 143 143 /* 144 144 * Our data write just errored, which means we've got a bunch of keys to ··· 163 163 164 164 op->insert_keys.top = dst; 165 165 166 - bch_data_insert_keys(cl); 166 + bch_data_insert_keys(&cl->work); 167 167 } 168 168 169 169 static void bch_data_insert_endio(struct bio *bio) ··· 184 184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); 185 185 } 186 186 187 - static void bch_data_insert_start(struct closure *cl) 187 + static CLOSURE_CALLBACK(bch_data_insert_start) 188 188 { 189 - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 189 + closure_type(op, struct data_insert_op, cl); 190 190 struct bio *bio = op->bio, *n; 191 191 192 192 if (op->bypass) ··· 305 305 * If op->bypass is true, instead of inserting the data it invalidates the 306 306 * region of the cache represented by op->bio and op->inode. 307 307 */ 308 - void bch_data_insert(struct closure *cl) 308 + CLOSURE_CALLBACK(bch_data_insert) 309 309 { 310 - struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 310 + closure_type(op, struct data_insert_op, cl); 311 311 312 312 trace_bcache_write(op->c, op->inode, op->bio, 313 313 op->writeback, op->bypass); 314 314 315 315 bch_keylist_init(&op->insert_keys); 316 316 bio_get(op->bio); 317 - bch_data_insert_start(cl); 317 + bch_data_insert_start(&cl->work); 318 318 } 319 319 320 320 /* ··· 575 575 return n == bio ? MAP_DONE : MAP_CONTINUE; 576 576 } 577 577 578 - static void cache_lookup(struct closure *cl) 578 + static CLOSURE_CALLBACK(cache_lookup) 579 579 { 580 - struct search *s = container_of(cl, struct search, iop.cl); 580 + closure_type(s, struct search, iop.cl); 581 581 struct bio *bio = &s->bio.bio; 582 582 struct cached_dev *dc; 583 583 int ret; ··· 698 698 bio_cnt_set(bio, 3); 699 699 } 700 700 701 - static void search_free(struct closure *cl) 701 + static CLOSURE_CALLBACK(search_free) 702 702 { 703 - struct search *s = container_of(cl, struct search, cl); 703 + closure_type(s, struct search, cl); 704 704 705 705 atomic_dec(&s->iop.c->search_inflight); 706 706 ··· 749 749 750 750 /* Cached devices */ 751 751 752 - static void cached_dev_bio_complete(struct closure *cl) 752 + static CLOSURE_CALLBACK(cached_dev_bio_complete) 753 753 { 754 - struct search *s = container_of(cl, struct search, cl); 754 + closure_type(s, struct search, cl); 755 755 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 756 756 757 757 cached_dev_put(dc); 758 - search_free(cl); 758 + search_free(&cl->work); 759 759 } 760 760 761 761 /* Process reads */ 762 762 763 - static void cached_dev_read_error_done(struct closure *cl) 763 + static CLOSURE_CALLBACK(cached_dev_read_error_done) 764 764 { 765 - struct search *s = container_of(cl, struct search, cl); 765 + closure_type(s, struct search, cl); 766 766 767 767 if (s->iop.replace_collision) 768 768 bch_mark_cache_miss_collision(s->iop.c, s->d); ··· 770 770 if (s->iop.bio) 771 771 bio_free_pages(s->iop.bio); 772 772 773 - cached_dev_bio_complete(cl); 773 + cached_dev_bio_complete(&cl->work); 774 774 } 775 775 776 - static void cached_dev_read_error(struct closure *cl) 776 + static CLOSURE_CALLBACK(cached_dev_read_error) 777 777 { 778 - struct search *s = container_of(cl, struct search, cl); 778 + closure_type(s, struct search, cl); 779 779 struct bio *bio = &s->bio.bio; 780 780 781 781 /* ··· 801 801 continue_at(cl, cached_dev_read_error_done, NULL); 802 802 } 803 803 804 - static void cached_dev_cache_miss_done(struct closure *cl) 804 + static CLOSURE_CALLBACK(cached_dev_cache_miss_done) 805 805 { 806 - struct search *s = container_of(cl, struct search, cl); 806 + closure_type(s, struct search, cl); 807 807 struct bcache_device *d = s->d; 808 808 809 809 if (s->iop.replace_collision) ··· 812 812 if (s->iop.bio) 813 813 bio_free_pages(s->iop.bio); 814 814 815 - cached_dev_bio_complete(cl); 815 + cached_dev_bio_complete(&cl->work); 816 816 closure_put(&d->cl); 817 817 } 818 818 819 - static void cached_dev_read_done(struct closure *cl) 819 + static CLOSURE_CALLBACK(cached_dev_read_done) 820 820 { 821 - struct search *s = container_of(cl, struct search, cl); 821 + closure_type(s, struct search, cl); 822 822 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 823 823 824 824 /* ··· 858 858 continue_at(cl, cached_dev_cache_miss_done, NULL); 859 859 } 860 860 861 - static void cached_dev_read_done_bh(struct closure *cl) 861 + static CLOSURE_CALLBACK(cached_dev_read_done_bh) 862 862 { 863 - struct search *s = container_of(cl, struct search, cl); 863 + closure_type(s, struct search, cl); 864 864 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 865 865 866 866 bch_mark_cache_accounting(s->iop.c, s->d, ··· 955 955 956 956 /* Process writes */ 957 957 958 - static void cached_dev_write_complete(struct closure *cl) 958 + static CLOSURE_CALLBACK(cached_dev_write_complete) 959 959 { 960 - struct search *s = container_of(cl, struct search, cl); 960 + closure_type(s, struct search, cl); 961 961 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 962 962 963 963 up_read_non_owner(&dc->writeback_lock); 964 - cached_dev_bio_complete(cl); 964 + cached_dev_bio_complete(&cl->work); 965 965 } 966 966 967 967 static void cached_dev_write(struct cached_dev *dc, struct search *s) ··· 1048 1048 continue_at(cl, cached_dev_write_complete, NULL); 1049 1049 } 1050 1050 1051 - static void cached_dev_nodata(struct closure *cl) 1051 + static CLOSURE_CALLBACK(cached_dev_nodata) 1052 1052 { 1053 - struct search *s = container_of(cl, struct search, cl); 1053 + closure_type(s, struct search, cl); 1054 1054 struct bio *bio = &s->bio.bio; 1055 1055 1056 1056 if (s->iop.flush_journal) ··· 1265 1265 return MAP_CONTINUE; 1266 1266 } 1267 1267 1268 - static void flash_dev_nodata(struct closure *cl) 1268 + static CLOSURE_CALLBACK(flash_dev_nodata) 1269 1269 { 1270 - struct search *s = container_of(cl, struct search, cl); 1270 + closure_type(s, struct search, cl); 1271 1271 1272 1272 if (s->iop.flush_journal) 1273 1273 bch_journal_meta(s->iop.c, cl);
+1 -1
drivers/md/bcache/request.h
··· 34 34 }; 35 35 36 36 unsigned int bch_get_congested(const struct cache_set *c); 37 - void bch_data_insert(struct closure *cl); 37 + CLOSURE_CALLBACK(bch_data_insert); 38 38 39 39 void bch_cached_dev_request_init(struct cached_dev *dc); 40 40 void cached_dev_submit_bio(struct bio *bio);
+20 -20
drivers/md/bcache/super.c
··· 327 327 submit_bio(bio); 328 328 } 329 329 330 - static void bch_write_bdev_super_unlock(struct closure *cl) 330 + static CLOSURE_CALLBACK(bch_write_bdev_super_unlock) 331 331 { 332 - struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write); 332 + closure_type(dc, struct cached_dev, sb_write); 333 333 334 334 up(&dc->sb_write_mutex); 335 335 } ··· 363 363 closure_put(&ca->set->sb_write); 364 364 } 365 365 366 - static void bcache_write_super_unlock(struct closure *cl) 366 + static CLOSURE_CALLBACK(bcache_write_super_unlock) 367 367 { 368 - struct cache_set *c = container_of(cl, struct cache_set, sb_write); 368 + closure_type(c, struct cache_set, sb_write); 369 369 370 370 up(&c->sb_write_mutex); 371 371 } ··· 407 407 closure_put(cl); 408 408 } 409 409 410 - static void uuid_io_unlock(struct closure *cl) 410 + static CLOSURE_CALLBACK(uuid_io_unlock) 411 411 { 412 - struct cache_set *c = container_of(cl, struct cache_set, uuid_write); 412 + closure_type(c, struct cache_set, uuid_write); 413 413 414 414 up(&c->uuid_write_mutex); 415 415 } ··· 1342 1342 module_put(THIS_MODULE); 1343 1343 } 1344 1344 1345 - static void cached_dev_free(struct closure *cl) 1345 + static CLOSURE_CALLBACK(cached_dev_free) 1346 1346 { 1347 - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1347 + closure_type(dc, struct cached_dev, disk.cl); 1348 1348 1349 1349 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) 1350 1350 cancel_writeback_rate_update_dwork(dc); ··· 1376 1376 kobject_put(&dc->disk.kobj); 1377 1377 } 1378 1378 1379 - static void cached_dev_flush(struct closure *cl) 1379 + static CLOSURE_CALLBACK(cached_dev_flush) 1380 1380 { 1381 - struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); 1381 + closure_type(dc, struct cached_dev, disk.cl); 1382 1382 struct bcache_device *d = &dc->disk; 1383 1383 1384 1384 mutex_lock(&bch_register_lock); ··· 1497 1497 kfree(d); 1498 1498 } 1499 1499 1500 - static void flash_dev_free(struct closure *cl) 1500 + static CLOSURE_CALLBACK(flash_dev_free) 1501 1501 { 1502 - struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1502 + closure_type(d, struct bcache_device, cl); 1503 1503 1504 1504 mutex_lock(&bch_register_lock); 1505 1505 atomic_long_sub(bcache_dev_sectors_dirty(d), ··· 1510 1510 kobject_put(&d->kobj); 1511 1511 } 1512 1512 1513 - static void flash_dev_flush(struct closure *cl) 1513 + static CLOSURE_CALLBACK(flash_dev_flush) 1514 1514 { 1515 - struct bcache_device *d = container_of(cl, struct bcache_device, cl); 1515 + closure_type(d, struct bcache_device, cl); 1516 1516 1517 1517 mutex_lock(&bch_register_lock); 1518 1518 bcache_device_unlink(d); ··· 1668 1668 module_put(THIS_MODULE); 1669 1669 } 1670 1670 1671 - static void cache_set_free(struct closure *cl) 1671 + static CLOSURE_CALLBACK(cache_set_free) 1672 1672 { 1673 - struct cache_set *c = container_of(cl, struct cache_set, cl); 1673 + closure_type(c, struct cache_set, cl); 1674 1674 struct cache *ca; 1675 1675 1676 1676 debugfs_remove(c->debug); ··· 1709 1709 kobject_put(&c->kobj); 1710 1710 } 1711 1711 1712 - static void cache_set_flush(struct closure *cl) 1712 + static CLOSURE_CALLBACK(cache_set_flush) 1713 1713 { 1714 - struct cache_set *c = container_of(cl, struct cache_set, caching); 1714 + closure_type(c, struct cache_set, caching); 1715 1715 struct cache *ca = c->cache; 1716 1716 struct btree *b; 1717 1717 ··· 1806 1806 } 1807 1807 } 1808 1808 1809 - static void __cache_set_unregister(struct closure *cl) 1809 + static CLOSURE_CALLBACK(__cache_set_unregister) 1810 1810 { 1811 - struct cache_set *c = container_of(cl, struct cache_set, caching); 1811 + closure_type(c, struct cache_set, caching); 1812 1812 struct cached_dev *dc; 1813 1813 struct bcache_device *d; 1814 1814 size_t i;
+8 -8
drivers/md/bcache/writeback.c
··· 341 341 bch_bio_map(bio, NULL); 342 342 } 343 343 344 - static void dirty_io_destructor(struct closure *cl) 344 + static CLOSURE_CALLBACK(dirty_io_destructor) 345 345 { 346 - struct dirty_io *io = container_of(cl, struct dirty_io, cl); 346 + closure_type(io, struct dirty_io, cl); 347 347 348 348 kfree(io); 349 349 } 350 350 351 - static void write_dirty_finish(struct closure *cl) 351 + static CLOSURE_CALLBACK(write_dirty_finish) 352 352 { 353 - struct dirty_io *io = container_of(cl, struct dirty_io, cl); 353 + closure_type(io, struct dirty_io, cl); 354 354 struct keybuf_key *w = io->bio.bi_private; 355 355 struct cached_dev *dc = io->dc; 356 356 ··· 400 400 closure_put(&io->cl); 401 401 } 402 402 403 - static void write_dirty(struct closure *cl) 403 + static CLOSURE_CALLBACK(write_dirty) 404 404 { 405 - struct dirty_io *io = container_of(cl, struct dirty_io, cl); 405 + closure_type(io, struct dirty_io, cl); 406 406 struct keybuf_key *w = io->bio.bi_private; 407 407 struct cached_dev *dc = io->dc; 408 408 ··· 462 462 dirty_endio(bio); 463 463 } 464 464 465 - static void read_dirty_submit(struct closure *cl) 465 + static CLOSURE_CALLBACK(read_dirty_submit) 466 466 { 467 - struct dirty_io *io = container_of(cl, struct dirty_io, cl); 467 + closure_type(io, struct dirty_io, cl); 468 468 469 469 closure_bio_submit(io->dc->disk.c, &io->bio, cl); 470 470
+3 -4
fs/bcachefs/btree_io.c
··· 1358 1358 return offset; 1359 1359 } 1360 1360 1361 - static void btree_node_read_all_replicas_done(struct closure *cl) 1361 + static CLOSURE_CALLBACK(btree_node_read_all_replicas_done) 1362 1362 { 1363 - struct btree_node_read_all *ra = 1364 - container_of(cl, struct btree_node_read_all, cl); 1363 + closure_type(ra, struct btree_node_read_all, cl); 1365 1364 struct bch_fs *c = ra->c; 1366 1365 struct btree *b = ra->b; 1367 1366 struct printbuf buf = PRINTBUF; ··· 1566 1567 1567 1568 if (sync) { 1568 1569 closure_sync(&ra->cl); 1569 - btree_node_read_all_replicas_done(&ra->cl); 1570 + btree_node_read_all_replicas_done(&ra->cl.work); 1570 1571 } else { 1571 1572 continue_at(&ra->cl, btree_node_read_all_replicas_done, 1572 1573 c->io_complete_wq);
+2 -2
fs/bcachefs/btree_update_interior.c
··· 778 778 } 779 779 } 780 780 781 - static void btree_update_set_nodes_written(struct closure *cl) 781 + static CLOSURE_CALLBACK(btree_update_set_nodes_written) 782 782 { 783 - struct btree_update *as = container_of(cl, struct btree_update, cl); 783 + closure_type(as, struct btree_update, cl); 784 784 struct bch_fs *c = as->c; 785 785 786 786 mutex_lock(&c->btree_interior_update_lock);
+4 -4
fs/bcachefs/fs-io-direct.c
··· 35 35 } 36 36 } 37 37 38 - static void bch2_dio_read_complete(struct closure *cl) 38 + static CLOSURE_CALLBACK(bch2_dio_read_complete) 39 39 { 40 - struct dio_read *dio = container_of(cl, struct dio_read, cl); 40 + closure_type(dio, struct dio_read, cl); 41 41 42 42 dio->req->ki_complete(dio->req, dio->ret); 43 43 bio_check_or_release(&dio->rbio.bio, dio->should_dirty); ··· 325 325 return 0; 326 326 } 327 327 328 - static void bch2_dio_write_flush_done(struct closure *cl) 328 + static CLOSURE_CALLBACK(bch2_dio_write_flush_done) 329 329 { 330 - struct dio_write *dio = container_of(cl, struct dio_write, op.cl); 330 + closure_type(dio, struct dio_write, op.cl); 331 331 struct bch_fs *c = dio->op.c; 332 332 333 333 closure_debug_destroy(cl);
+7 -7
fs/bcachefs/io_write.c
··· 580 580 __wp_update_state(wp, state); 581 581 } 582 582 583 - static void bch2_write_index(struct closure *cl) 583 + static CLOSURE_CALLBACK(bch2_write_index) 584 584 { 585 - struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); 585 + closure_type(op, struct bch_write_op, cl); 586 586 struct write_point *wp = op->wp; 587 587 struct workqueue_struct *wq = index_update_wq(op); 588 588 unsigned long flags; ··· 1208 1208 bch2_nocow_write_convert_unwritten(op); 1209 1209 } 1210 1210 1211 - static void bch2_nocow_write_done(struct closure *cl) 1211 + static CLOSURE_CALLBACK(bch2_nocow_write_done) 1212 1212 { 1213 - struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); 1213 + closure_type(op, struct bch_write_op, cl); 1214 1214 1215 1215 __bch2_nocow_write_done(op); 1216 1216 bch2_write_done(cl); ··· 1363 1363 op->insert_keys.top = op->insert_keys.keys; 1364 1364 } else if (op->flags & BCH_WRITE_SYNC) { 1365 1365 closure_sync(&op->cl); 1366 - bch2_nocow_write_done(&op->cl); 1366 + bch2_nocow_write_done(&op->cl.work); 1367 1367 } else { 1368 1368 /* 1369 1369 * XXX ··· 1566 1566 * If op->discard is true, instead of inserting the data it invalidates the 1567 1567 * region of the cache represented by op->bio and op->inode. 1568 1568 */ 1569 - void bch2_write(struct closure *cl) 1569 + CLOSURE_CALLBACK(bch2_write) 1570 1570 { 1571 - struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); 1571 + closure_type(op, struct bch_write_op, cl); 1572 1572 struct bio *bio = &op->wbio.bio; 1573 1573 struct bch_fs *c = op->c; 1574 1574 unsigned data_len;
+1 -2
fs/bcachefs/io_write.h
··· 90 90 op->devs_need_flush = NULL; 91 91 } 92 92 93 - void bch2_write(struct closure *); 94 - 93 + CLOSURE_CALLBACK(bch2_write); 95 94 void bch2_write_point_do_index_updates(struct work_struct *); 96 95 97 96 static inline struct bch_write_bio *wbio_init(struct bio *bio)
+8 -9
fs/bcachefs/journal_io.c
··· 1025 1025 return 0; 1026 1026 } 1027 1027 1028 - static void bch2_journal_read_device(struct closure *cl) 1028 + static CLOSURE_CALLBACK(bch2_journal_read_device) 1029 1029 { 1030 - struct journal_device *ja = 1031 - container_of(cl, struct journal_device, read); 1030 + closure_type(ja, struct journal_device, read); 1032 1031 struct bch_dev *ca = container_of(ja, struct bch_dev, journal); 1033 1032 struct bch_fs *c = ca->fs; 1034 1033 struct journal_list *jlist = ··· 1519 1520 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); 1520 1521 } 1521 1522 1522 - static void journal_write_done(struct closure *cl) 1523 + static CLOSURE_CALLBACK(journal_write_done) 1523 1524 { 1524 - struct journal *j = container_of(cl, struct journal, io); 1525 + closure_type(j, struct journal, io); 1525 1526 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1526 1527 struct journal_buf *w = journal_last_unwritten_buf(j); 1527 1528 struct bch_replicas_padded replicas; ··· 1637 1638 percpu_ref_put(&ca->io_ref); 1638 1639 } 1639 1640 1640 - static void do_journal_write(struct closure *cl) 1641 + static CLOSURE_CALLBACK(do_journal_write) 1641 1642 { 1642 - struct journal *j = container_of(cl, struct journal, io); 1643 + closure_type(j, struct journal, io); 1643 1644 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1644 1645 struct bch_dev *ca; 1645 1646 struct journal_buf *w = journal_last_unwritten_buf(j); ··· 1849 1850 return 0; 1850 1851 } 1851 1852 1852 - void bch2_journal_write(struct closure *cl) 1853 + CLOSURE_CALLBACK(bch2_journal_write) 1853 1854 { 1854 - struct journal *j = container_of(cl, struct journal, io); 1855 + closure_type(j, struct journal, io); 1855 1856 struct bch_fs *c = container_of(j, struct bch_fs, journal); 1856 1857 struct bch_dev *ca; 1857 1858 struct journal_buf *w = journal_last_unwritten_buf(j);
+1 -1
fs/bcachefs/journal_io.h
··· 60 60 61 61 int bch2_journal_read(struct bch_fs *, u64 *, u64 *, u64 *); 62 62 63 - void bch2_journal_write(struct closure *); 63 + CLOSURE_CALLBACK(bch2_journal_write); 64 64 65 65 #endif /* _BCACHEFS_JOURNAL_IO_H */
+7 -2
include/linux/closure.h
··· 104 104 105 105 struct closure; 106 106 struct closure_syncer; 107 - typedef void (closure_fn) (struct closure *); 107 + typedef void (closure_fn) (struct work_struct *); 108 108 extern struct dentry *bcache_debug; 109 109 110 110 struct closure_waitlist { ··· 254 254 INIT_WORK(&cl->work, cl->work.func); 255 255 BUG_ON(!queue_work(wq, &cl->work)); 256 256 } else 257 - cl->fn(cl); 257 + cl->fn(&cl->work); 258 258 } 259 259 260 260 /** ··· 308 308 smp_mb(); 309 309 __closure_wake_up(list); 310 310 } 311 + 312 + #define CLOSURE_CALLBACK(name) void name(struct work_struct *ws) 313 + #define closure_type(name, type, member) \ 314 + struct closure *cl = container_of(ws, struct closure, work); \ 315 + type *name = container_of(cl, type, member) 311 316 312 317 /** 313 318 * continue_at - jump to another function with barrier
+3 -2
lib/closure.c
··· 36 36 closure_debug_destroy(cl); 37 37 38 38 if (destructor) 39 - destructor(cl); 39 + destructor(&cl->work); 40 40 41 41 if (parent) 42 42 closure_put(parent); ··· 108 108 int done; 109 109 }; 110 110 111 - static void closure_sync_fn(struct closure *cl) 111 + static CLOSURE_CALLBACK(closure_sync_fn) 112 112 { 113 + struct closure *cl = container_of(ws, struct closure, work); 113 114 struct closure_syncer *s = cl->s; 114 115 struct task_struct *p; 115 116