Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: Heap allocate btree_trans

We're using more stack than we'd like in a number of functions, and
btree_trans is the biggest object that we stack allocate.

But we have to do a heap allocatation to initialize it anyways, so
there's no real downside to heap allocating the entire thing.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+731 -877
+15 -18
fs/bcachefs/acl.c
··· 279 279 struct bch_fs *c = inode->v.i_sb->s_fs_info; 280 280 struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); 281 281 struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0); 282 - struct btree_trans trans; 282 + struct btree_trans *trans = bch2_trans_get(c); 283 283 struct btree_iter iter = { NULL }; 284 284 struct bkey_s_c_xattr xattr; 285 285 struct posix_acl *acl = NULL; 286 286 struct bkey_s_c k; 287 287 int ret; 288 - 289 - bch2_trans_init(&trans, c, 0, 0); 290 288 retry: 291 - bch2_trans_begin(&trans); 289 + bch2_trans_begin(trans); 292 290 293 - ret = bch2_hash_lookup(&trans, &iter, bch2_xattr_hash_desc, 291 + ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, 294 292 &hash, inode_inum(inode), &search, 0); 295 293 if (ret) { 296 294 if (!bch2_err_matches(ret, ENOENT)) ··· 304 306 } 305 307 306 308 xattr = bkey_s_c_to_xattr(k); 307 - acl = bch2_acl_from_disk(&trans, xattr_val(xattr.v), 309 + acl = bch2_acl_from_disk(trans, xattr_val(xattr.v), 308 310 le16_to_cpu(xattr.v->x_val_len)); 309 311 310 312 if (!IS_ERR(acl)) ··· 313 315 if (bch2_err_matches(PTR_ERR_OR_ZERO(acl), BCH_ERR_transaction_restart)) 314 316 goto retry; 315 317 316 - bch2_trans_iter_exit(&trans, &iter); 317 - bch2_trans_exit(&trans); 318 + bch2_trans_iter_exit(trans, &iter); 319 + bch2_trans_put(trans); 318 320 return acl; 319 321 } 320 322 ··· 354 356 { 355 357 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 356 358 struct bch_fs *c = inode->v.i_sb->s_fs_info; 357 - struct btree_trans trans; 359 + struct btree_trans *trans = bch2_trans_get(c); 358 360 struct btree_iter inode_iter = { NULL }; 359 361 struct bch_inode_unpacked inode_u; 360 362 struct posix_acl *acl; ··· 362 364 int ret; 363 365 364 366 mutex_lock(&inode->ei_update_lock); 365 - bch2_trans_init(&trans, c, 0, 0); 366 367 retry: 367 - bch2_trans_begin(&trans); 368 + bch2_trans_begin(trans); 368 369 acl = _acl; 369 370 370 - ret = bch2_inode_peek(&trans, &inode_iter, &inode_u, inode_inum(inode), 371 + ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode), 371 372 BTREE_ITER_INTENT); 372 373 if (ret) 373 374 goto btree_err; ··· 379 382 goto btree_err; 380 383 } 381 384 382 - ret = bch2_set_acl_trans(&trans, inode_inum(inode), &inode_u, acl, type); 385 + ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type); 383 386 if (ret) 384 387 goto btree_err; 385 388 386 389 inode_u.bi_ctime = bch2_current_time(c); 387 390 inode_u.bi_mode = mode; 388 391 389 - ret = bch2_inode_write(&trans, &inode_iter, &inode_u) ?: 390 - bch2_trans_commit(&trans, NULL, NULL, 0); 392 + ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?: 393 + bch2_trans_commit(trans, NULL, NULL, 0); 391 394 btree_err: 392 - bch2_trans_iter_exit(&trans, &inode_iter); 395 + bch2_trans_iter_exit(trans, &inode_iter); 393 396 394 397 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 395 398 goto retry; 396 399 if (unlikely(ret)) 397 400 goto err; 398 401 399 - bch2_inode_update_after_write(&trans, inode, &inode_u, 402 + bch2_inode_update_after_write(trans, inode, &inode_u, 400 403 ATTR_CTIME|ATTR_MODE); 401 404 402 405 set_cached_acl(&inode->v, type, acl); 403 406 err: 404 - bch2_trans_exit(&trans); 405 407 mutex_unlock(&inode->ei_update_lock); 408 + bch2_trans_put(trans); 406 409 407 410 return ret; 408 411 }
+60 -73
fs/bcachefs/alloc_background.c
··· 548 548 549 549 int bch2_bucket_gens_init(struct bch_fs *c) 550 550 { 551 - struct btree_trans trans; 551 + struct btree_trans *trans = bch2_trans_get(c); 552 552 struct btree_iter iter; 553 553 struct bkey_s_c k; 554 554 struct bch_alloc_v4 a; ··· 559 559 u8 gen; 560 560 int ret; 561 561 562 - bch2_trans_init(&trans, c, 0, 0); 563 - 564 - for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, 562 + for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 565 563 BTREE_ITER_PREFETCH, k, ret) { 566 564 /* 567 565 * Not a fsck error because this is checked/repaired by ··· 572 574 pos = alloc_gens_pos(iter.pos, &offset); 573 575 574 576 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) { 575 - ret = commit_do(&trans, NULL, NULL, 577 + ret = commit_do(trans, NULL, NULL, 576 578 BTREE_INSERT_NOFAIL| 577 579 BTREE_INSERT_LAZY_RW, 578 - bch2_btree_insert_trans(&trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 580 + bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 579 581 if (ret) 580 582 break; 581 583 have_bucket_gens_key = false; ··· 589 591 590 592 g.v.gens[offset] = gen; 591 593 } 592 - bch2_trans_iter_exit(&trans, &iter); 594 + bch2_trans_iter_exit(trans, &iter); 593 595 594 596 if (have_bucket_gens_key && !ret) 595 - ret = commit_do(&trans, NULL, NULL, 597 + ret = commit_do(trans, NULL, NULL, 596 598 BTREE_INSERT_NOFAIL| 597 599 BTREE_INSERT_LAZY_RW, 598 - bch2_btree_insert_trans(&trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 600 + bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 599 601 600 - bch2_trans_exit(&trans); 602 + bch2_trans_put(trans); 601 603 602 604 if (ret) 603 605 bch_err_fn(c, ret); ··· 606 608 607 609 int bch2_alloc_read(struct bch_fs *c) 608 610 { 609 - struct btree_trans trans; 611 + struct btree_trans *trans = bch2_trans_get(c); 610 612 struct btree_iter iter; 611 613 struct bkey_s_c k; 612 614 struct bch_dev *ca; 613 615 int ret; 614 616 615 617 down_read(&c->gc_lock); 616 - bch2_trans_init(&trans, c, 0, 0); 617 618 618 619 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { 619 620 const struct bch_bucket_gens *g; 620 621 u64 b; 621 622 622 - for_each_btree_key(&trans, iter, BTREE_ID_bucket_gens, POS_MIN, 623 + for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN, 623 624 BTREE_ITER_PREFETCH, k, ret) { 624 625 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 625 626 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; ··· 642 645 b++) 643 646 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; 644 647 } 645 - bch2_trans_iter_exit(&trans, &iter); 648 + bch2_trans_iter_exit(trans, &iter); 646 649 } else { 647 650 struct bch_alloc_v4 a; 648 651 649 - for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, 652 + for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 650 653 BTREE_ITER_PREFETCH, k, ret) { 651 654 /* 652 655 * Not a fsck error because this is checked/repaired by ··· 659 662 660 663 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; 661 664 } 662 - bch2_trans_iter_exit(&trans, &iter); 665 + bch2_trans_iter_exit(trans, &iter); 663 666 } 664 667 665 - bch2_trans_exit(&trans); 668 + bch2_trans_put(trans); 666 669 up_read(&c->gc_lock); 667 670 668 671 if (ret) ··· 1368 1371 1369 1372 int bch2_check_alloc_info(struct bch_fs *c) 1370 1373 { 1371 - struct btree_trans trans; 1374 + struct btree_trans *trans = bch2_trans_get(c); 1372 1375 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter; 1373 1376 struct bkey hole; 1374 1377 struct bkey_s_c k; 1375 1378 int ret = 0; 1376 1379 1377 - bch2_trans_init(&trans, c, 0, 0); 1378 - 1379 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN, 1380 + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, 1380 1381 BTREE_ITER_PREFETCH); 1381 - bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN, 1382 + bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN, 1382 1383 BTREE_ITER_PREFETCH); 1383 - bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN, 1384 + bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN, 1384 1385 BTREE_ITER_PREFETCH); 1385 - bch2_trans_iter_init(&trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN, 1386 + bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN, 1386 1387 BTREE_ITER_PREFETCH); 1387 1388 1388 1389 while (1) { 1389 1390 struct bpos next; 1390 1391 1391 - bch2_trans_begin(&trans); 1392 + bch2_trans_begin(trans); 1392 1393 1393 1394 k = bch2_get_key_or_real_bucket_hole(&iter, &hole); 1394 1395 ret = bkey_err(k); ··· 1399 1404 if (k.k->type) { 1400 1405 next = bpos_nosnap_successor(k.k->p); 1401 1406 1402 - ret = bch2_check_alloc_key(&trans, 1407 + ret = bch2_check_alloc_key(trans, 1403 1408 k, &iter, 1404 1409 &discard_iter, 1405 1410 &freespace_iter, ··· 1409 1414 } else { 1410 1415 next = k.k->p; 1411 1416 1412 - ret = bch2_check_alloc_hole_freespace(&trans, 1417 + ret = bch2_check_alloc_hole_freespace(trans, 1413 1418 bkey_start_pos(k.k), 1414 1419 &next, 1415 1420 &freespace_iter) ?: 1416 - bch2_check_alloc_hole_bucket_gens(&trans, 1421 + bch2_check_alloc_hole_bucket_gens(trans, 1417 1422 bkey_start_pos(k.k), 1418 1423 &next, 1419 1424 &bucket_gens_iter); ··· 1421 1426 goto bkey_err; 1422 1427 } 1423 1428 1424 - ret = bch2_trans_commit(&trans, NULL, NULL, 1429 + ret = bch2_trans_commit(trans, NULL, NULL, 1425 1430 BTREE_INSERT_NOFAIL| 1426 1431 BTREE_INSERT_LAZY_RW); 1427 1432 if (ret) ··· 1434 1439 if (ret) 1435 1440 break; 1436 1441 } 1437 - bch2_trans_iter_exit(&trans, &bucket_gens_iter); 1438 - bch2_trans_iter_exit(&trans, &freespace_iter); 1439 - bch2_trans_iter_exit(&trans, &discard_iter); 1440 - bch2_trans_iter_exit(&trans, &iter); 1442 + bch2_trans_iter_exit(trans, &bucket_gens_iter); 1443 + bch2_trans_iter_exit(trans, &freespace_iter); 1444 + bch2_trans_iter_exit(trans, &discard_iter); 1445 + bch2_trans_iter_exit(trans, &iter); 1441 1446 1442 1447 if (ret < 0) 1443 1448 goto err; 1444 1449 1445 - ret = for_each_btree_key2(&trans, iter, 1450 + ret = for_each_btree_key2(trans, iter, 1446 1451 BTREE_ID_need_discard, POS_MIN, 1447 1452 BTREE_ITER_PREFETCH, k, 1448 - bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?: 1449 - for_each_btree_key2(&trans, iter, 1453 + bch2_check_discard_freespace_key(trans, &iter, k.k->p)) ?: 1454 + for_each_btree_key2(trans, iter, 1450 1455 BTREE_ID_freespace, POS_MIN, 1451 1456 BTREE_ITER_PREFETCH, k, 1452 - bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?: 1453 - for_each_btree_key_commit(&trans, iter, 1457 + bch2_check_discard_freespace_key(trans, &iter, k.k->p)) ?: 1458 + for_each_btree_key_commit(trans, iter, 1454 1459 BTREE_ID_bucket_gens, POS_MIN, 1455 1460 BTREE_ITER_PREFETCH, k, 1456 1461 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, 1457 - bch2_check_bucket_gens_key(&trans, &iter, k)); 1462 + bch2_check_bucket_gens_key(trans, &iter, k)); 1458 1463 err: 1459 - bch2_trans_exit(&trans); 1464 + bch2_trans_put(trans); 1460 1465 if (ret) 1461 1466 bch_err_fn(c, ret); 1462 1467 return ret; ··· 1542 1547 int ret = 0; 1543 1548 1544 1549 ret = bch2_trans_run(c, 1545 - for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc, 1550 + for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, 1546 1551 POS_MIN, BTREE_ITER_PREFETCH, k, 1547 1552 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, 1548 - bch2_check_alloc_to_lru_ref(&trans, &iter))); 1553 + bch2_check_alloc_to_lru_ref(trans, &iter))); 1549 1554 if (ret) 1550 1555 bch_err_fn(c, ret); 1551 1556 return ret; ··· 1670 1675 static void bch2_do_discards_work(struct work_struct *work) 1671 1676 { 1672 1677 struct bch_fs *c = container_of(work, struct bch_fs, discard_work); 1673 - struct btree_trans trans; 1674 1678 struct btree_iter iter; 1675 1679 struct bkey_s_c k; 1676 1680 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0; 1677 1681 struct bpos discard_pos_done = POS_MAX; 1678 1682 int ret; 1679 1683 1680 - bch2_trans_init(&trans, c, 0, 0); 1681 - 1682 1684 /* 1683 1685 * We're doing the commit in bch2_discard_one_bucket instead of using 1684 1686 * for_each_btree_key_commit() so that we can increment counters after 1685 1687 * successful commit: 1686 1688 */ 1687 - ret = for_each_btree_key2(&trans, iter, 1688 - BTREE_ID_need_discard, POS_MIN, 0, k, 1689 - bch2_discard_one_bucket(&trans, &iter, &discard_pos_done, 1690 - &seen, 1691 - &open, 1692 - &need_journal_commit, 1693 - &discarded)); 1694 - 1695 - bch2_trans_exit(&trans); 1689 + ret = bch2_trans_run(c, 1690 + for_each_btree_key2(trans, iter, 1691 + BTREE_ID_need_discard, POS_MIN, 0, k, 1692 + bch2_discard_one_bucket(trans, &iter, &discard_pos_done, 1693 + &seen, 1694 + &open, 1695 + &need_journal_commit, 1696 + &discarded))); 1696 1697 1697 1698 if (need_journal_commit * 2 > seen) 1698 1699 bch2_journal_flush_async(&c->journal, NULL); ··· 1794 1803 { 1795 1804 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work); 1796 1805 struct bch_dev *ca; 1797 - struct btree_trans trans; 1806 + struct btree_trans *trans = bch2_trans_get(c); 1798 1807 struct btree_iter iter; 1799 1808 struct bkey_s_c k; 1800 1809 unsigned i; 1801 1810 int ret = 0; 1802 1811 1803 - bch2_trans_init(&trans, c, 0, 0); 1804 - 1805 - ret = bch2_btree_write_buffer_flush(&trans); 1812 + ret = bch2_btree_write_buffer_flush(trans); 1806 1813 if (ret) 1807 1814 goto err; 1808 1815 ··· 1808 1819 s64 nr_to_invalidate = 1809 1820 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); 1810 1821 1811 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_lru, 1822 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru, 1812 1823 lru_pos(ca->dev_idx, 0, 0), 1813 1824 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX), 1814 1825 BTREE_ITER_INTENT, k, 1815 - invalidate_one_bucket(&trans, &iter, k, &nr_to_invalidate)); 1826 + invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate)); 1816 1827 1817 1828 if (ret < 0) { 1818 1829 percpu_ref_put(&ca->ref); ··· 1820 1831 } 1821 1832 } 1822 1833 err: 1823 - bch2_trans_exit(&trans); 1834 + bch2_trans_put(trans); 1824 1835 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); 1825 1836 } 1826 1837 ··· 1834 1845 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, 1835 1846 unsigned long *last_updated) 1836 1847 { 1837 - struct btree_trans trans; 1848 + struct btree_trans *trans = bch2_trans_get(c); 1838 1849 struct btree_iter iter; 1839 1850 struct bkey_s_c k; 1840 1851 struct bkey hole; ··· 1842 1853 struct bch_member *m; 1843 1854 int ret; 1844 1855 1845 - bch2_trans_init(&trans, c, 0, 0); 1846 - 1847 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, 1856 + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, 1848 1857 POS(ca->dev_idx, ca->mi.first_bucket), 1849 1858 BTREE_ITER_PREFETCH); 1850 1859 /* ··· 1856 1869 *last_updated = jiffies; 1857 1870 } 1858 1871 1859 - bch2_trans_begin(&trans); 1872 + bch2_trans_begin(trans); 1860 1873 1861 1874 if (bkey_ge(iter.pos, end)) { 1862 1875 ret = 0; ··· 1876 1889 struct bch_alloc_v4 a_convert; 1877 1890 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); 1878 1891 1879 - ret = bch2_bucket_do_index(&trans, k, a, true) ?: 1880 - bch2_trans_commit(&trans, NULL, NULL, 1892 + ret = bch2_bucket_do_index(trans, k, a, true) ?: 1893 + bch2_trans_commit(trans, NULL, NULL, 1881 1894 BTREE_INSERT_LAZY_RW| 1882 1895 BTREE_INSERT_NOFAIL); 1883 1896 if (ret) ··· 1887 1900 } else { 1888 1901 struct bkey_i *freespace; 1889 1902 1890 - freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace)); 1903 + freespace = bch2_trans_kmalloc(trans, sizeof(*freespace)); 1891 1904 ret = PTR_ERR_OR_ZERO(freespace); 1892 1905 if (ret) 1893 1906 goto bkey_err; ··· 1897 1910 freespace->k.p = k.k->p; 1898 1911 freespace->k.size = k.k->size; 1899 1912 1900 - ret = bch2_btree_insert_trans(&trans, BTREE_ID_freespace, freespace, 0) ?: 1901 - bch2_trans_commit(&trans, NULL, NULL, 1913 + ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?: 1914 + bch2_trans_commit(trans, NULL, NULL, 1902 1915 BTREE_INSERT_LAZY_RW| 1903 1916 BTREE_INSERT_NOFAIL); 1904 1917 if (ret) ··· 1913 1926 break; 1914 1927 } 1915 1928 1916 - bch2_trans_iter_exit(&trans, &iter); 1917 - bch2_trans_exit(&trans); 1929 + bch2_trans_iter_exit(trans, &iter); 1930 + bch2_trans_put(trans); 1918 1931 1919 1932 if (ret < 0) { 1920 1933 bch_err_msg(ca, ret, "initializing free space");
+1 -1
fs/bcachefs/alloc_foreground.c
··· 602 602 struct open_bucket *ob; 603 603 604 604 bch2_trans_do(c, NULL, NULL, 0, 605 - PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, watermark, 605 + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark, 606 606 cl, &usage))); 607 607 return ob; 608 608 }
+10 -12
fs/bcachefs/backpointers.c
··· 390 390 int ret; 391 391 392 392 ret = bch2_trans_run(c, 393 - for_each_btree_key_commit(&trans, iter, 393 + for_each_btree_key_commit(trans, iter, 394 394 BTREE_ID_backpointers, POS_MIN, 0, k, 395 395 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 396 - bch2_check_btree_backpointer(&trans, &iter, k))); 396 + bch2_check_btree_backpointer(trans, &iter, k))); 397 397 if (ret) 398 398 bch_err_fn(c, ret); 399 399 return ret; ··· 723 723 724 724 int bch2_check_extents_to_backpointers(struct bch_fs *c) 725 725 { 726 - struct btree_trans trans; 726 + struct btree_trans *trans = bch2_trans_get(c); 727 727 struct bpos start = POS_MIN, end; 728 728 int ret; 729 729 730 - bch2_trans_init(&trans, c, 0, 0); 731 730 while (1) { 732 - ret = bch2_get_alloc_in_memory_pos(&trans, start, &end); 731 + ret = bch2_get_alloc_in_memory_pos(trans, start, &end); 733 732 if (ret) 734 733 break; 735 734 ··· 748 749 printbuf_exit(&buf); 749 750 } 750 751 751 - ret = bch2_check_extents_to_backpointers_pass(&trans, start, end); 752 + ret = bch2_check_extents_to_backpointers_pass(trans, start, end); 752 753 if (ret || bpos_eq(end, SPOS_MAX)) 753 754 break; 754 755 755 756 start = bpos_successor(end); 756 757 } 757 - bch2_trans_exit(&trans); 758 + bch2_trans_put(trans); 758 759 759 760 if (ret) 760 761 bch_err_fn(c, ret); ··· 823 824 824 825 int bch2_check_backpointers_to_extents(struct bch_fs *c) 825 826 { 826 - struct btree_trans trans; 827 + struct btree_trans *trans = bch2_trans_get(c); 827 828 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end; 828 829 int ret; 829 830 830 - bch2_trans_init(&trans, c, 0, 0); 831 831 while (1) { 832 - ret = bch2_get_btree_in_memory_pos(&trans, 832 + ret = bch2_get_btree_in_memory_pos(trans, 833 833 (1U << BTREE_ID_extents)| 834 834 (1U << BTREE_ID_reflink), 835 835 ~0, ··· 854 856 printbuf_exit(&buf); 855 857 } 856 858 857 - ret = bch2_check_backpointers_to_extents_pass(&trans, start, end); 859 + ret = bch2_check_backpointers_to_extents_pass(trans, start, end); 858 860 if (ret || !bbpos_cmp(end, BBPOS_MAX)) 859 861 break; 860 862 861 863 start = bbpos_successor(end); 862 864 } 863 - bch2_trans_exit(&trans); 865 + bch2_trans_put(trans); 864 866 865 867 if (ret) 866 868 bch_err_fn(c, ret);
+4 -4
fs/bcachefs/bcachefs.h
··· 627 627 size_t size; 628 628 }; 629 629 630 - struct btree_path_buf { 631 - struct btree_path *path; 630 + struct btree_trans_buf { 631 + struct btree_trans *trans; 632 632 }; 633 633 634 634 #define REPLICAS_DELTA_LIST_MAX (1U << 16) ··· 787 787 /* btree_iter.c: */ 788 788 struct seqmutex btree_trans_lock; 789 789 struct list_head btree_trans_list; 790 - mempool_t btree_paths_pool; 790 + mempool_t btree_trans_pool; 791 791 mempool_t btree_trans_mem_pool; 792 - struct btree_path_buf __percpu *btree_paths_bufs; 792 + struct btree_trans_buf __percpu *btree_trans_bufs; 793 793 794 794 struct srcu_struct btree_trans_barrier; 795 795 bool btree_trans_barrier_initialized;
+43 -52
fs/bcachefs/btree_gc.c
··· 529 529 530 530 int bch2_check_topology(struct bch_fs *c) 531 531 { 532 - struct btree_trans trans; 532 + struct btree_trans *trans = bch2_trans_get(c); 533 533 struct btree *b; 534 534 unsigned i; 535 535 int ret = 0; 536 - 537 - bch2_trans_init(&trans, c, 0, 0); 538 536 539 537 for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) { 540 538 struct btree_root *r = bch2_btree_id_root(c, i); ··· 544 546 if (btree_node_fake(b)) 545 547 continue; 546 548 547 - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); 548 - ret = bch2_btree_repair_topology_recurse(&trans, b); 549 + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 550 + ret = bch2_btree_repair_topology_recurse(trans, b); 549 551 six_unlock_read(&b->c.lock); 550 552 551 553 if (ret == DROP_THIS_NODE) { ··· 554 556 } 555 557 } 556 558 557 - bch2_trans_exit(&trans); 559 + bch2_trans_put(trans); 558 560 559 561 return ret; 560 562 } ··· 1066 1068 1067 1069 static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only) 1068 1070 { 1069 - struct btree_trans trans; 1071 + struct btree_trans *trans = bch2_trans_get(c); 1070 1072 enum btree_id ids[BTREE_ID_NR]; 1071 1073 unsigned i; 1072 1074 int ret = 0; 1073 - 1074 - bch2_trans_init(&trans, c, 0, 0); 1075 1075 1076 1076 for (i = 0; i < BTREE_ID_NR; i++) 1077 1077 ids[i] = i; ··· 1077 1081 1078 1082 for (i = 0; i < BTREE_ID_NR && !ret; i++) 1079 1083 ret = initial 1080 - ? bch2_gc_btree_init(&trans, ids[i], metadata_only) 1081 - : bch2_gc_btree(&trans, ids[i], initial, metadata_only); 1084 + ? bch2_gc_btree_init(trans, ids[i], metadata_only) 1085 + : bch2_gc_btree(trans, ids[i], initial, metadata_only); 1082 1086 1083 1087 for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) { 1084 1088 if (!bch2_btree_id_root(c, i)->alive) 1085 1089 continue; 1086 1090 1087 1091 ret = initial 1088 - ? bch2_gc_btree_init(&trans, i, metadata_only) 1089 - : bch2_gc_btree(&trans, i, initial, metadata_only); 1092 + ? bch2_gc_btree_init(trans, i, metadata_only) 1093 + : bch2_gc_btree(trans, i, initial, metadata_only); 1090 1094 } 1091 1095 1092 1096 if (ret < 0) 1093 1097 bch_err_fn(c, ret); 1094 1098 1095 - bch2_trans_exit(&trans); 1099 + bch2_trans_put(trans); 1096 1100 return ret; 1097 1101 } 1098 1102 ··· 1454 1458 1455 1459 static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only) 1456 1460 { 1457 - struct btree_trans trans; 1461 + struct btree_trans *trans = bch2_trans_get(c); 1458 1462 struct btree_iter iter; 1459 1463 struct bkey_s_c k; 1460 1464 struct bch_dev *ca; 1461 1465 unsigned i; 1462 1466 int ret = 0; 1463 1467 1464 - bch2_trans_init(&trans, c, 0, 0); 1465 - 1466 1468 for_each_member_device(ca, c, i) { 1467 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc, 1469 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, 1468 1470 POS(ca->dev_idx, ca->mi.first_bucket), 1469 1471 BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k, 1470 1472 NULL, NULL, BTREE_INSERT_LAZY_RW, 1471 - bch2_alloc_write_key(&trans, &iter, k, metadata_only)); 1473 + bch2_alloc_write_key(trans, &iter, k, metadata_only)); 1472 1474 1473 1475 if (ret < 0) { 1474 1476 bch_err_fn(c, ret); ··· 1475 1481 } 1476 1482 } 1477 1483 1478 - bch2_trans_exit(&trans); 1484 + bch2_trans_put(trans); 1479 1485 return ret < 0 ? ret : 0; 1480 1486 } 1481 1487 1482 1488 static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) 1483 1489 { 1484 1490 struct bch_dev *ca; 1485 - struct btree_trans trans; 1491 + struct btree_trans *trans = bch2_trans_get(c); 1486 1492 struct btree_iter iter; 1487 1493 struct bkey_s_c k; 1488 1494 struct bucket *g; ··· 1498 1504 if (!buckets) { 1499 1505 percpu_ref_put(&ca->ref); 1500 1506 bch_err(c, "error allocating ca->buckets[gc]"); 1501 - return -BCH_ERR_ENOMEM_gc_alloc_start; 1507 + ret = -BCH_ERR_ENOMEM_gc_alloc_start; 1508 + goto err; 1502 1509 } 1503 1510 1504 1511 buckets->first_bucket = ca->mi.first_bucket; ··· 1507 1512 rcu_assign_pointer(ca->buckets_gc, buckets); 1508 1513 } 1509 1514 1510 - bch2_trans_init(&trans, c, 0, 0); 1511 - 1512 - for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, 1515 + for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 1513 1516 BTREE_ITER_PREFETCH, k, ret) { 1514 1517 ca = bch_dev_bkey_exists(c, k.k->p.inode); 1515 1518 g = gc_bucket(ca, k.k->p.offset); ··· 1528 1535 g->stripe_redundancy = a->stripe_redundancy; 1529 1536 } 1530 1537 } 1531 - bch2_trans_iter_exit(&trans, &iter); 1532 - 1533 - bch2_trans_exit(&trans); 1534 - 1538 + bch2_trans_iter_exit(trans, &iter); 1539 + err: 1540 + bch2_trans_put(trans); 1535 1541 if (ret) 1536 1542 bch_err_fn(c, ret); 1537 - 1538 1543 return ret; 1539 1544 } 1540 1545 ··· 1607 1616 1608 1617 static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only) 1609 1618 { 1610 - struct btree_trans trans; 1619 + struct btree_trans *trans; 1611 1620 struct btree_iter iter; 1612 1621 struct bkey_s_c k; 1613 1622 size_t idx = 0; ··· 1616 1625 if (metadata_only) 1617 1626 return 0; 1618 1627 1619 - bch2_trans_init(&trans, c, 0, 0); 1628 + trans = bch2_trans_get(c); 1620 1629 1621 - ret = for_each_btree_key_commit(&trans, iter, 1630 + ret = for_each_btree_key_commit(trans, iter, 1622 1631 BTREE_ID_reflink, POS_MIN, 1623 1632 BTREE_ITER_PREFETCH, k, 1624 1633 NULL, NULL, BTREE_INSERT_NOFAIL, 1625 - bch2_gc_write_reflink_key(&trans, &iter, k, &idx)); 1634 + bch2_gc_write_reflink_key(trans, &iter, k, &idx)); 1626 1635 1627 1636 c->reflink_gc_nr = 0; 1628 - bch2_trans_exit(&trans); 1637 + bch2_trans_put(trans); 1629 1638 return ret; 1630 1639 } 1631 1640 1632 1641 static int bch2_gc_reflink_start(struct bch_fs *c, 1633 1642 bool metadata_only) 1634 1643 { 1635 - struct btree_trans trans; 1644 + struct btree_trans *trans; 1636 1645 struct btree_iter iter; 1637 1646 struct bkey_s_c k; 1638 1647 struct reflink_gc *r; ··· 1641 1650 if (metadata_only) 1642 1651 return 0; 1643 1652 1644 - bch2_trans_init(&trans, c, 0, 0); 1653 + trans = bch2_trans_get(c); 1645 1654 c->reflink_gc_nr = 0; 1646 1655 1647 - for_each_btree_key(&trans, iter, BTREE_ID_reflink, POS_MIN, 1656 + for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN, 1648 1657 BTREE_ITER_PREFETCH, k, ret) { 1649 1658 const __le64 *refcount = bkey_refcount_c(k); 1650 1659 ··· 1662 1671 r->size = k.k->size; 1663 1672 r->refcount = 0; 1664 1673 } 1665 - bch2_trans_iter_exit(&trans, &iter); 1674 + bch2_trans_iter_exit(trans, &iter); 1666 1675 1667 - bch2_trans_exit(&trans); 1676 + bch2_trans_put(trans); 1668 1677 return ret; 1669 1678 } 1670 1679 ··· 1731 1740 1732 1741 static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only) 1733 1742 { 1734 - struct btree_trans trans; 1743 + struct btree_trans *trans; 1735 1744 struct btree_iter iter; 1736 1745 struct bkey_s_c k; 1737 1746 int ret = 0; ··· 1739 1748 if (metadata_only) 1740 1749 return 0; 1741 1750 1742 - bch2_trans_init(&trans, c, 0, 0); 1751 + trans = bch2_trans_get(c); 1743 1752 1744 - ret = for_each_btree_key_commit(&trans, iter, 1753 + ret = for_each_btree_key_commit(trans, iter, 1745 1754 BTREE_ID_stripes, POS_MIN, 1746 1755 BTREE_ITER_PREFETCH, k, 1747 1756 NULL, NULL, BTREE_INSERT_NOFAIL, 1748 - bch2_gc_write_stripes_key(&trans, &iter, k)); 1757 + bch2_gc_write_stripes_key(trans, &iter, k)); 1749 1758 1750 - bch2_trans_exit(&trans); 1759 + bch2_trans_put(trans); 1751 1760 return ret; 1752 1761 } 1753 1762 ··· 1933 1942 1934 1943 int bch2_gc_gens(struct bch_fs *c) 1935 1944 { 1936 - struct btree_trans trans; 1945 + struct btree_trans *trans; 1937 1946 struct btree_iter iter; 1938 1947 struct bkey_s_c k; 1939 1948 struct bch_dev *ca; ··· 1951 1960 1952 1961 trace_and_count(c, gc_gens_start, c); 1953 1962 down_read(&c->gc_lock); 1954 - bch2_trans_init(&trans, c, 0, 0); 1963 + trans = bch2_trans_get(c); 1955 1964 1956 1965 for_each_member_device(ca, c, i) { 1957 1966 struct bucket_gens *gens; ··· 1977 1986 c->gc_gens_btree = i; 1978 1987 c->gc_gens_pos = POS_MIN; 1979 1988 1980 - ret = for_each_btree_key_commit(&trans, iter, i, 1989 + ret = for_each_btree_key_commit(trans, iter, i, 1981 1990 POS_MIN, 1982 1991 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, 1983 1992 k, 1984 1993 NULL, NULL, 1985 1994 BTREE_INSERT_NOFAIL, 1986 - gc_btree_gens_key(&trans, &iter, k)); 1995 + gc_btree_gens_key(trans, &iter, k)); 1987 1996 if (ret && !bch2_err_matches(ret, EROFS)) 1988 1997 bch_err_fn(c, ret); 1989 1998 if (ret) 1990 1999 goto err; 1991 2000 } 1992 2001 1993 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc, 2002 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, 1994 2003 POS_MIN, 1995 2004 BTREE_ITER_PREFETCH, 1996 2005 k, 1997 2006 NULL, NULL, 1998 2007 BTREE_INSERT_NOFAIL, 1999 - bch2_alloc_write_oldest_gen(&trans, &iter, k)); 2008 + bch2_alloc_write_oldest_gen(trans, &iter, k)); 2000 2009 if (ret && !bch2_err_matches(ret, EROFS)) 2001 2010 bch_err_fn(c, ret); 2002 2011 if (ret) ··· 2015 2024 ca->oldest_gen = NULL; 2016 2025 } 2017 2026 2018 - bch2_trans_exit(&trans); 2027 + bch2_trans_put(trans); 2019 2028 up_read(&c->gc_lock); 2020 2029 mutex_unlock(&c->gc_gens_lock); 2021 2030 return ret;
+5 -8
fs/bcachefs/btree_io.c
··· 1628 1628 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id, 1629 1629 const struct bkey_i *k, unsigned level) 1630 1630 { 1631 - return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level)); 1632 - 1631 + return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level)); 1633 1632 } 1634 1633 1635 1634 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b, ··· 1690 1691 1691 1692 static void btree_node_write_done(struct bch_fs *c, struct btree *b) 1692 1693 { 1693 - struct btree_trans trans; 1694 + struct btree_trans *trans = bch2_trans_get(c); 1694 1695 1695 - bch2_trans_init(&trans, c, 0, 0); 1696 - 1697 - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); 1696 + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 1698 1697 __btree_node_write_done(c, b); 1699 1698 six_unlock_read(&b->c.lock); 1700 1699 1701 - bch2_trans_exit(&trans); 1700 + bch2_trans_put(trans); 1702 1701 } 1703 1702 1704 1703 static void btree_node_write_work(struct work_struct *work) ··· 1725 1728 } 1726 1729 } else { 1727 1730 ret = bch2_trans_do(c, NULL, NULL, 0, 1728 - bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key, 1731 + bch2_btree_node_update_key_get_iter(trans, b, &wbio->key, 1729 1732 BCH_WATERMARK_reclaim| 1730 1733 BTREE_INSERT_JOURNAL_RECLAIM| 1731 1734 BTREE_INSERT_NOFAIL|
+43 -39
fs/bcachefs/btree_iter.c
··· 2906 2906 return trans->restart_count; 2907 2907 } 2908 2908 2909 - static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c) 2909 + static struct btree_trans *bch2_trans_alloc(struct bch_fs *c) 2910 2910 { 2911 - size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX; 2912 - size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX; 2913 - void *p = NULL; 2911 + struct btree_trans *trans; 2914 2912 2915 - BUG_ON(trans->used_mempool); 2916 - 2917 - #ifdef __KERNEL__ 2918 - p = this_cpu_xchg(c->btree_paths_bufs->path, NULL); 2919 - #endif 2920 - if (!p) { 2921 - p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS); 2922 - /* 2923 - * paths need to be zeroed, bch2_check_for_deadlock looks at 2924 - * paths in other threads 2925 - */ 2926 - memset(p, 0, paths_bytes); 2913 + if (IS_ENABLED(__KERNEL__)) { 2914 + trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL); 2915 + if (trans) 2916 + return trans; 2927 2917 } 2928 2918 2929 - trans->paths = p; p += paths_bytes; 2930 - trans->updates = p; p += updates_bytes; 2919 + trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS); 2920 + /* 2921 + * paths need to be zeroed, bch2_check_for_deadlock looks at 2922 + * paths in other threads 2923 + */ 2924 + memset(&trans->paths, 0, sizeof(trans->paths)); 2925 + return trans; 2931 2926 } 2932 2927 2933 2928 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR]; ··· 2942 2947 return i; 2943 2948 } 2944 2949 2945 - void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx) 2950 + struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) 2946 2951 __acquires(&c->btree_trans_barrier) 2947 2952 { 2953 + struct btree_trans *trans; 2948 2954 struct btree_transaction_stats *s; 2955 + 2956 + trans = bch2_trans_alloc(c); 2949 2957 2950 2958 memset(trans, 0, sizeof(*trans)); 2951 2959 trans->c = c; ··· 2960 2962 trans->journal_replay_not_finished = 2961 2963 !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags); 2962 2964 closure_init_stack(&trans->ref); 2963 - 2964 - bch2_trans_alloc_paths(trans, c); 2965 2965 2966 2966 s = btree_trans_stats(trans); 2967 2967 if (s && s->max_mem) { ··· 3006 3010 list_add_done: 3007 3011 seqmutex_unlock(&c->btree_trans_lock); 3008 3012 } 3013 + 3014 + return trans; 3009 3015 } 3010 3016 3011 3017 static void check_btree_paths_leaked(struct btree_trans *trans) ··· 3032 3034 #endif 3033 3035 } 3034 3036 3035 - void bch2_trans_exit(struct btree_trans *trans) 3037 + void bch2_trans_put(struct btree_trans *trans) 3036 3038 __releases(&c->btree_trans_barrier) 3037 3039 { 3038 3040 struct btree_insert_entry *i; ··· 3078 3080 else 3079 3081 kfree(trans->mem); 3080 3082 3081 - #ifdef __KERNEL__ 3082 - /* 3083 - * Userspace doesn't have a real percpu implementation: 3084 - */ 3085 - trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths); 3086 - #endif 3087 - 3088 - if (trans->paths) 3089 - mempool_free(trans->paths, &c->btree_paths_pool); 3090 - 3091 - trans->mem = (void *) 0x1; 3092 - trans->paths = (void *) 0x1; 3083 + /* Userspace doesn't have a real percpu implementation: */ 3084 + if (IS_ENABLED(__KERNEL__)) 3085 + trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans); 3086 + if (trans) 3087 + mempool_free(trans, &c->btree_trans_pool); 3093 3088 } 3094 3089 3095 3090 static void __maybe_unused ··· 3160 3169 void bch2_fs_btree_iter_exit(struct bch_fs *c) 3161 3170 { 3162 3171 struct btree_transaction_stats *s; 3172 + struct btree_trans *trans; 3173 + int cpu; 3174 + 3175 + trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list); 3176 + if (trans) 3177 + panic("%s leaked btree_trans\n", trans->fn); 3178 + 3179 + if (c->btree_trans_bufs) 3180 + for_each_possible_cpu(cpu) 3181 + kfree(per_cpu_ptr(c->btree_trans_bufs, cpu)->trans); 3182 + free_percpu(c->btree_trans_bufs); 3163 3183 3164 3184 for (s = c->btree_transaction_stats; 3165 3185 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); ··· 3182 3180 if (c->btree_trans_barrier_initialized) 3183 3181 cleanup_srcu_struct(&c->btree_trans_barrier); 3184 3182 mempool_exit(&c->btree_trans_mem_pool); 3185 - mempool_exit(&c->btree_paths_pool); 3183 + mempool_exit(&c->btree_trans_pool); 3186 3184 } 3187 3185 3188 3186 int bch2_fs_btree_iter_init(struct bch_fs *c) 3189 3187 { 3190 3188 struct btree_transaction_stats *s; 3191 - unsigned nr = BTREE_ITER_MAX; 3192 3189 int ret; 3193 3190 3194 3191 for (s = c->btree_transaction_stats; ··· 3200 3199 INIT_LIST_HEAD(&c->btree_trans_list); 3201 3200 seqmutex_init(&c->btree_trans_lock); 3202 3201 3203 - ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1, 3204 - sizeof(struct btree_path) * nr + 3205 - sizeof(struct btree_insert_entry) * nr) ?: 3202 + c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf); 3203 + if (!c->btree_trans_bufs) 3204 + return -ENOMEM; 3205 + 3206 + ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1, 3207 + sizeof(struct btree_trans)) ?: 3206 3208 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1, 3207 3209 BTREE_TRANS_MEM_MAX) ?: 3208 3210 init_srcu_struct(&c->btree_trans_barrier);
+7 -7
fs/bcachefs/btree_iter.h
··· 915 915 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *); 916 916 void bch2_dump_trans_updates(struct btree_trans *); 917 917 void bch2_dump_trans_paths_updates(struct btree_trans *); 918 - void __bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned); 919 - void bch2_trans_exit(struct btree_trans *); 918 + 919 + struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned); 920 + void bch2_trans_put(struct btree_trans *); 920 921 921 922 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR]; 922 923 unsigned bch2_trans_get_fn_idx(const char *); 923 924 924 - #define bch2_trans_init(_trans, _c, _nr_iters, _mem) \ 925 - do { \ 925 + #define bch2_trans_get(_c) \ 926 + ({ \ 926 927 static unsigned trans_fn_idx; \ 927 928 \ 928 929 if (unlikely(!trans_fn_idx)) \ 929 930 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \ 930 - \ 931 - __bch2_trans_init(_trans, _c, trans_fn_idx); \ 932 - } while (0) 931 + __bch2_trans_get(_c, trans_fn_idx); \ 932 + }) 933 933 934 934 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *); 935 935
+5 -7
fs/bcachefs/btree_key_cache.c
··· 704 704 struct bkey_cached *ck = 705 705 container_of(pin, struct bkey_cached, journal); 706 706 struct bkey_cached_key key; 707 - struct btree_trans trans; 707 + struct btree_trans *trans = bch2_trans_get(c); 708 708 int srcu_idx = srcu_read_lock(&c->btree_trans_barrier); 709 709 int ret = 0; 710 710 711 - bch2_trans_init(&trans, c, 0, 0); 712 - 713 - btree_node_lock_nopath_nofail(&trans, &ck->c, SIX_LOCK_read); 711 + btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read); 714 712 key = ck->key; 715 713 716 714 if (ck->journal.seq != seq || ··· 725 727 } 726 728 six_unlock_read(&ck->c.lock); 727 729 728 - ret = commit_do(&trans, NULL, NULL, 0, 729 - btree_key_cache_flush_pos(&trans, key, seq, 730 + ret = commit_do(trans, NULL, NULL, 0, 731 + btree_key_cache_flush_pos(trans, key, seq, 730 732 BTREE_INSERT_JOURNAL_RECLAIM, false)); 731 733 unlock: 732 734 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); 733 735 734 - bch2_trans_exit(&trans); 736 + bch2_trans_put(trans); 735 737 return ret; 736 738 } 737 739
+3 -5
fs/bcachefs/btree_trans_commit.c
··· 163 163 struct bch_fs *c = container_of(j, struct bch_fs, journal); 164 164 struct btree_write *w = container_of(pin, struct btree_write, journal); 165 165 struct btree *b = container_of(w, struct btree, writes[i]); 166 - struct btree_trans trans; 166 + struct btree_trans *trans = bch2_trans_get(c); 167 167 unsigned long old, new, v; 168 168 unsigned idx = w - b->writes; 169 169 170 - bch2_trans_init(&trans, c, 0, 0); 171 - 172 - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); 170 + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 173 171 v = READ_ONCE(b->flags); 174 172 175 173 do { ··· 186 188 btree_node_write_if_need(c, b, SIX_LOCK_read); 187 189 six_unlock_read(&b->c.lock); 188 190 189 - bch2_trans_exit(&trans); 191 + bch2_trans_put(trans); 190 192 return 0; 191 193 } 192 194
+2 -2
fs/bcachefs/btree_types.h
··· 452 452 void *mem; 453 453 454 454 u8 sorted[BTREE_ITER_MAX + 8]; 455 - struct btree_path *paths; 456 - struct btree_insert_entry *updates; 455 + struct btree_path paths[BTREE_ITER_MAX]; 456 + struct btree_insert_entry updates[BTREE_ITER_MAX]; 457 457 struct btree_write_buffered_key *wb_updates; 458 458 459 459 /* update path: */
+3 -3
fs/bcachefs/btree_update.c
··· 692 692 struct disk_reservation *disk_res, int flags) 693 693 { 694 694 return bch2_trans_do(c, disk_res, NULL, flags, 695 - bch2_btree_insert_trans(&trans, id, k, 0)); 695 + bch2_btree_insert_trans(trans, id, k, 0)); 696 696 } 697 697 698 698 int bch2_btree_delete_extent_at(struct btree_trans *trans, struct btree_iter *iter, ··· 824 824 u64 *journal_seq) 825 825 { 826 826 int ret = bch2_trans_run(c, 827 - bch2_btree_delete_range_trans(&trans, id, start, end, 827 + bch2_btree_delete_range_trans(trans, id, start, end, 828 828 update_flags, journal_seq)); 829 829 if (ret == -BCH_ERR_transaction_restart_nested) 830 830 ret = 0; ··· 898 898 } else { 899 899 ret = bch2_trans_do(c, NULL, NULL, 900 900 BTREE_INSERT_LAZY_RW|commit_flags, 901 - __bch2_trans_log_msg(&trans.extra_journal_entries, fmt, args)); 901 + __bch2_trans_log_msg(&trans->extra_journal_entries, fmt, args)); 902 902 } 903 903 904 904 return ret;
+6 -19
fs/bcachefs/btree_update.h
··· 146 146 nested_lockrestart_do(_trans, _do ?: bch2_trans_commit(_trans, (_disk_res),\ 147 147 (_journal_seq), (_flags))) 148 148 149 - #define bch2_trans_do(_c, _disk_res, _journal_seq, _flags, _do) \ 149 + #define bch2_trans_run(_c, _do) \ 150 150 ({ \ 151 - struct btree_trans trans; \ 152 - int _ret; \ 153 - \ 154 - bch2_trans_init(&trans, (_c), 0, 0); \ 155 - _ret = commit_do(&trans, _disk_res, _journal_seq, _flags, _do); \ 156 - bch2_trans_exit(&trans); \ 157 - \ 151 + struct btree_trans *trans = bch2_trans_get(_c); \ 152 + int _ret = (_do); \ 153 + bch2_trans_put(trans); \ 158 154 _ret; \ 159 155 }) 160 156 161 - #define bch2_trans_run(_c, _do) \ 162 - ({ \ 163 - struct btree_trans trans; \ 164 - int _ret; \ 165 - \ 166 - bch2_trans_init(&trans, (_c), 0, 0); \ 167 - _ret = (_do); \ 168 - bch2_trans_exit(&trans); \ 169 - \ 170 - _ret; \ 171 - }) 157 + #define bch2_trans_do(_c, _disk_res, _journal_seq, _flags, _do) \ 158 + bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do)) 172 159 173 160 #define trans_for_each_update(_trans, _i) \ 174 161 for ((_i) = (_trans)->updates; \
+17 -18
fs/bcachefs/btree_update_interior.c
··· 597 597 { 598 598 struct bch_fs *c = as->c; 599 599 struct btree *b; 600 - struct btree_trans trans; 600 + struct btree_trans *trans = bch2_trans_get(c); 601 601 u64 journal_seq = 0; 602 602 unsigned i; 603 603 int ret; 604 604 605 - bch2_trans_init(&trans, c, 0, 512); 606 605 /* 607 606 * If we're already in an error state, it might be because a btree node 608 607 * was never written, and we might be trying to free that same btree ··· 622 623 623 624 b = as->old_nodes[i]; 624 625 625 - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); 626 + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 626 627 seq = b->data ? b->data->keys.seq : 0; 627 628 six_unlock_read(&b->c.lock); 628 629 ··· 644 645 * journal reclaim does btree updates when flushing bkey_cached entries, 645 646 * which may require allocations as well. 646 647 */ 647 - ret = commit_do(&trans, &as->disk_res, &journal_seq, 648 + ret = commit_do(trans, &as->disk_res, &journal_seq, 648 649 BCH_WATERMARK_reclaim| 649 650 BTREE_INSERT_NOFAIL| 650 651 BTREE_INSERT_NOCHECK_RW| 651 652 BTREE_INSERT_JOURNAL_RECLAIM, 652 - btree_update_nodes_written_trans(&trans, as)); 653 - bch2_trans_unlock(&trans); 653 + btree_update_nodes_written_trans(trans, as)); 654 + bch2_trans_unlock(trans); 654 655 655 656 bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c, 656 657 "%s(): error %s", __func__, bch2_err_str(ret)); ··· 659 660 struct btree_path *path; 660 661 661 662 b = as->b; 662 - path = get_unlocked_mut_path(&trans, as->btree_id, b->c.level, b->key.k.p); 663 + path = get_unlocked_mut_path(trans, as->btree_id, b->c.level, b->key.k.p); 663 664 /* 664 665 * @b is the node we did the final insert into: 665 666 * ··· 682 683 * we may rarely end up with a locked path besides the one we 683 684 * have here: 684 685 */ 685 - bch2_trans_unlock(&trans); 686 - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_intent); 687 - mark_btree_node_locked(&trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); 686 + bch2_trans_unlock(trans); 687 + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); 688 + mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED); 688 689 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); 689 690 path->l[b->c.level].b = b; 690 691 691 - bch2_btree_node_lock_write_nofail(&trans, path, &b->c); 692 + bch2_btree_node_lock_write_nofail(trans, path, &b->c); 692 693 693 694 mutex_lock(&c->btree_interior_update_lock); 694 695 ··· 728 729 six_unlock_write(&b->c.lock); 729 730 730 731 btree_node_write_if_need(c, b, SIX_LOCK_intent); 731 - btree_node_unlock(&trans, path, b->c.level); 732 - bch2_path_put(&trans, path, true); 732 + btree_node_unlock(trans, path, b->c.level); 733 + bch2_path_put(trans, path, true); 733 734 } 734 735 735 736 bch2_journal_pin_drop(&c->journal, &as->journal); ··· 749 750 for (i = 0; i < as->nr_new_nodes; i++) { 750 751 b = as->new_nodes[i]; 751 752 752 - btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read); 753 + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read); 753 754 btree_node_write_if_need(c, b, SIX_LOCK_read); 754 755 six_unlock_read(&b->c.lock); 755 756 } ··· 757 758 for (i = 0; i < as->nr_open_buckets; i++) 758 759 bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]); 759 760 760 - bch2_btree_update_free(as, &trans); 761 - bch2_trans_exit(&trans); 761 + bch2_btree_update_free(as, trans); 762 + bch2_trans_put(trans); 762 763 } 763 764 764 765 static void btree_interior_update_work(struct work_struct *work) ··· 2048 2049 int ret; 2049 2050 2050 2051 ret = bch2_trans_do(c, NULL, NULL, 0, 2051 - async_btree_node_rewrite_trans(&trans, a)); 2052 + async_btree_node_rewrite_trans(trans, a)); 2052 2053 if (ret) 2053 2054 bch_err_fn(c, ret); 2054 2055 bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite); ··· 2364 2365 2365 2366 void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) 2366 2367 { 2367 - bch2_trans_run(c, __bch2_btree_root_alloc(&trans, id)); 2368 + bch2_trans_run(c, __bch2_btree_root_alloc(trans, id)); 2368 2369 } 2369 2370 2370 2371 void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
+1 -1
fs/bcachefs/btree_write_buffer.c
··· 296 296 mutex_lock(&wb->flush_lock); 297 297 298 298 return bch2_trans_run(c, 299 - __bch2_btree_write_buffer_flush(&trans, BTREE_INSERT_NOCHECK_RW, true)); 299 + __bch2_btree_write_buffer_flush(trans, BTREE_INSERT_NOCHECK_RW, true)); 300 300 } 301 301 302 302 static inline u64 btree_write_buffer_ref(int idx)
+1 -1
fs/bcachefs/buckets.c
··· 1923 1923 1924 1924 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca) 1925 1925 { 1926 - int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca)); 1926 + int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca)); 1927 1927 1928 1928 if (ret) 1929 1929 bch_err_fn(c, ret);
+1 -1
fs/bcachefs/data_update.c
··· 303 303 304 304 int bch2_data_update_index_update(struct bch_write_op *op) 305 305 { 306 - return bch2_trans_run(op->c, __bch2_data_update_index_update(&trans, op)); 306 + return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op)); 307 307 } 308 308 309 309 void bch2_data_update_read_done(struct data_update *m,
+17 -17
fs/bcachefs/debug.c
··· 366 366 size_t size, loff_t *ppos) 367 367 { 368 368 struct dump_iter *i = file->private_data; 369 - struct btree_trans trans; 369 + struct btree_trans *trans; 370 370 struct btree_iter iter; 371 371 struct bkey_s_c k; 372 372 ssize_t ret; ··· 379 379 if (ret) 380 380 return ret; 381 381 382 - bch2_trans_init(&trans, i->c, 0, 0); 383 - ret = for_each_btree_key2(&trans, iter, i->id, i->from, 382 + trans = bch2_trans_get(i->c); 383 + ret = for_each_btree_key2(trans, iter, i->id, i->from, 384 384 BTREE_ITER_PREFETCH| 385 385 BTREE_ITER_ALL_SNAPSHOTS, k, ({ 386 386 bch2_bkey_val_to_text(&i->buf, i->c, k); 387 387 prt_newline(&i->buf); 388 - drop_locks_do(&trans, flush_buf(i)); 388 + drop_locks_do(trans, flush_buf(i)); 389 389 })); 390 390 i->from = iter.pos; 391 391 392 - bch2_trans_exit(&trans); 392 + bch2_trans_put(trans); 393 393 394 394 if (!ret) 395 395 ret = flush_buf(i); ··· 408 408 size_t size, loff_t *ppos) 409 409 { 410 410 struct dump_iter *i = file->private_data; 411 - struct btree_trans trans; 411 + struct btree_trans *trans; 412 412 struct btree_iter iter; 413 413 struct btree *b; 414 414 ssize_t ret; ··· 424 424 if (bpos_eq(SPOS_MAX, i->from)) 425 425 return i->ret; 426 426 427 - bch2_trans_init(&trans, i->c, 0, 0); 427 + trans = bch2_trans_get(i->c); 428 428 retry: 429 - bch2_trans_begin(&trans); 429 + bch2_trans_begin(trans); 430 430 431 - for_each_btree_node(&trans, iter, i->id, i->from, 0, b, ret) { 431 + for_each_btree_node(trans, iter, i->id, i->from, 0, b, ret) { 432 432 bch2_btree_node_to_text(&i->buf, i->c, b); 433 433 i->from = !bpos_eq(SPOS_MAX, b->key.k.p) 434 434 ? bpos_successor(b->key.k.p) 435 435 : b->key.k.p; 436 436 437 - ret = drop_locks_do(&trans, flush_buf(i)); 437 + ret = drop_locks_do(trans, flush_buf(i)); 438 438 if (ret) 439 439 break; 440 440 } 441 - bch2_trans_iter_exit(&trans, &iter); 441 + bch2_trans_iter_exit(trans, &iter); 442 442 443 443 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 444 444 goto retry; 445 445 446 - bch2_trans_exit(&trans); 446 + bch2_trans_put(trans); 447 447 448 448 if (!ret) 449 449 ret = flush_buf(i); ··· 462 462 size_t size, loff_t *ppos) 463 463 { 464 464 struct dump_iter *i = file->private_data; 465 - struct btree_trans trans; 465 + struct btree_trans *trans; 466 466 struct btree_iter iter; 467 467 struct bkey_s_c k; 468 468 ssize_t ret; ··· 475 475 if (ret) 476 476 return ret; 477 477 478 - bch2_trans_init(&trans, i->c, 0, 0); 478 + trans = bch2_trans_get(i->c); 479 479 480 - ret = for_each_btree_key2(&trans, iter, i->id, i->from, 480 + ret = for_each_btree_key2(trans, iter, i->id, i->from, 481 481 BTREE_ITER_PREFETCH| 482 482 BTREE_ITER_ALL_SNAPSHOTS, k, ({ 483 483 struct btree_path_level *l = &iter.path->l[0]; ··· 490 490 } 491 491 492 492 bch2_bfloat_to_text(&i->buf, l->b, _k); 493 - drop_locks_do(&trans, flush_buf(i)); 493 + drop_locks_do(trans, flush_buf(i)); 494 494 })); 495 495 i->from = iter.pos; 496 496 497 - bch2_trans_exit(&trans); 497 + bch2_trans_put(trans); 498 498 499 499 if (!ret) 500 500 ret = flush_buf(i);
+14 -17
fs/bcachefs/dirent.c
··· 479 479 const struct bch_hash_info *hash_info, 480 480 const struct qstr *name, subvol_inum *inum) 481 481 { 482 - struct btree_trans trans; 482 + struct btree_trans *trans = bch2_trans_get(c); 483 483 struct btree_iter iter; 484 484 int ret; 485 - 486 - bch2_trans_init(&trans, c, 0, 0); 487 485 retry: 488 - bch2_trans_begin(&trans); 486 + bch2_trans_begin(trans); 489 487 490 - ret = __bch2_dirent_lookup_trans(&trans, &iter, dir, hash_info, 488 + ret = __bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, 491 489 name, inum, 0); 492 490 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 493 491 goto retry; 494 492 if (!ret) 495 - bch2_trans_iter_exit(&trans, &iter); 496 - bch2_trans_exit(&trans); 493 + bch2_trans_iter_exit(trans, &iter); 494 + bch2_trans_put(trans); 497 495 return ret; 498 496 } 499 497 ··· 520 522 521 523 int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx) 522 524 { 523 - struct btree_trans trans; 525 + struct btree_trans *trans = bch2_trans_get(c); 524 526 struct btree_iter iter; 525 527 struct bkey_s_c k; 526 528 struct bkey_s_c_dirent dirent; ··· 531 533 int ret; 532 534 533 535 bch2_bkey_buf_init(&sk); 534 - bch2_trans_init(&trans, c, 0, 0); 535 536 retry: 536 - bch2_trans_begin(&trans); 537 + bch2_trans_begin(trans); 537 538 538 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 539 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 539 540 if (ret) 540 541 goto err; 541 542 542 - for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_dirents, 543 + for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_dirents, 543 544 SPOS(inum.inum, ctx->pos, snapshot), 544 545 POS(inum.inum, U64_MAX), 0, k, ret) { 545 546 if (k.k->type != KEY_TYPE_dirent) ··· 546 549 547 550 dirent = bkey_s_c_to_dirent(k); 548 551 549 - ret = bch2_dirent_read_target(&trans, inum, dirent, &target); 552 + ret = bch2_dirent_read_target(trans, inum, dirent, &target); 550 553 if (ret < 0) 551 554 break; 552 555 if (ret) ··· 555 558 /* dir_emit() can fault and block: */ 556 559 bch2_bkey_buf_reassemble(&sk, c, k); 557 560 dirent = bkey_i_to_s_c_dirent(sk.k); 558 - bch2_trans_unlock(&trans); 561 + bch2_trans_unlock(trans); 559 562 560 563 name = bch2_dirent_get_name(dirent); 561 564 ··· 571 574 * read_target looks up subvolumes, we can overflow paths if the 572 575 * directory has many subvolumes in it 573 576 */ 574 - ret = btree_trans_too_many_iters(&trans); 577 + ret = btree_trans_too_many_iters(trans); 575 578 if (ret) 576 579 break; 577 580 } 578 - bch2_trans_iter_exit(&trans, &iter); 581 + bch2_trans_iter_exit(trans, &iter); 579 582 err: 580 583 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 581 584 goto retry; 582 585 583 - bch2_trans_exit(&trans); 586 + bch2_trans_put(trans); 584 587 bch2_bkey_buf_exit(&sk, c); 585 588 586 589 return ret;
+14 -20
fs/bcachefs/ec.c
··· 476 476 477 477 static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe) 478 478 { 479 - return bch2_trans_run(c, get_stripe_key_trans(&trans, idx, stripe)); 479 + return bch2_trans_run(c, get_stripe_key_trans(trans, idx, stripe)); 480 480 } 481 481 482 482 /* recovery read path: */ ··· 788 788 { 789 789 struct bch_fs *c = 790 790 container_of(work, struct bch_fs, ec_stripe_delete_work); 791 - struct btree_trans trans; 791 + struct btree_trans *trans = bch2_trans_get(c); 792 792 int ret; 793 793 u64 idx; 794 - 795 - bch2_trans_init(&trans, c, 0, 0); 796 794 797 795 while (1) { 798 796 mutex_lock(&c->ec_stripes_heap_lock); ··· 800 802 if (!idx) 801 803 break; 802 804 803 - ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL, 804 - ec_stripe_delete(&trans, idx)); 805 + ret = commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL, 806 + ec_stripe_delete(trans, idx)); 805 807 if (ret) { 806 808 bch_err_fn(c, ret); 807 809 break; 808 810 } 809 811 } 810 812 811 - bch2_trans_exit(&trans); 813 + bch2_trans_put(trans); 812 814 813 815 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete); 814 816 } ··· 997 999 998 1000 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s) 999 1001 { 1000 - struct btree_trans trans; 1002 + struct btree_trans *trans = bch2_trans_get(c); 1001 1003 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v; 1002 1004 unsigned i, nr_data = v->nr_blocks - v->nr_redundant; 1003 1005 int ret = 0; 1004 1006 1005 - bch2_trans_init(&trans, c, 0, 0); 1006 - 1007 - ret = bch2_btree_write_buffer_flush(&trans); 1007 + ret = bch2_btree_write_buffer_flush(trans); 1008 1008 if (ret) 1009 1009 goto err; 1010 1010 1011 1011 for (i = 0; i < nr_data; i++) { 1012 - ret = ec_stripe_update_bucket(&trans, s, i); 1012 + ret = ec_stripe_update_bucket(trans, s, i); 1013 1013 if (ret) 1014 1014 break; 1015 1015 } 1016 1016 err: 1017 - bch2_trans_exit(&trans); 1017 + bch2_trans_put(trans); 1018 1018 1019 1019 return ret; 1020 1020 } ··· 1120 1124 ret = bch2_trans_do(c, &s->res, NULL, 1121 1125 BTREE_INSERT_NOCHECK_RW| 1122 1126 BTREE_INSERT_NOFAIL, 1123 - ec_stripe_key_update(&trans, 1127 + ec_stripe_key_update(trans, 1124 1128 bkey_i_to_stripe(&s->new_stripe.key), 1125 1129 !s->have_existing_stripe)); 1126 1130 if (ret) { ··· 1818 1822 1819 1823 int bch2_stripes_read(struct bch_fs *c) 1820 1824 { 1821 - struct btree_trans trans; 1825 + struct btree_trans *trans = bch2_trans_get(c); 1822 1826 struct btree_iter iter; 1823 1827 struct bkey_s_c k; 1824 1828 const struct bch_stripe *s; ··· 1826 1830 unsigned i; 1827 1831 int ret; 1828 1832 1829 - bch2_trans_init(&trans, c, 0, 0); 1830 - 1831 - for_each_btree_key(&trans, iter, BTREE_ID_stripes, POS_MIN, 1833 + for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN, 1832 1834 BTREE_ITER_PREFETCH, k, ret) { 1833 1835 if (k.k->type != KEY_TYPE_stripe) 1834 1836 continue; ··· 1849 1855 1850 1856 bch2_stripes_heap_insert(c, m, k.k->p.offset); 1851 1857 } 1852 - bch2_trans_iter_exit(&trans, &iter); 1858 + bch2_trans_iter_exit(trans, &iter); 1853 1859 1854 - bch2_trans_exit(&trans); 1860 + bch2_trans_put(trans); 1855 1861 1856 1862 if (ret) 1857 1863 bch_err_fn(c, ret);
+5 -11
fs/bcachefs/fs-io-buffered.c
··· 270 270 struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host); 271 271 struct bch_fs *c = inode->v.i_sb->s_fs_info; 272 272 struct bch_io_opts opts; 273 - struct btree_trans trans; 273 + struct btree_trans *trans = bch2_trans_get(c); 274 274 struct folio *folio; 275 275 struct readpages_iter readpages_iter; 276 276 int ret; ··· 279 279 280 280 ret = readpages_iter_init(&readpages_iter, ractl); 281 281 BUG_ON(ret); 282 - 283 - bch2_trans_init(&trans, c, 0, 0); 284 282 285 283 bch2_pagecache_add_get(inode); 286 284 ··· 298 300 rbio->bio.bi_end_io = bch2_readpages_end_io; 299 301 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0)); 300 302 301 - bchfs_read(&trans, rbio, inode_inum(inode), 303 + bchfs_read(trans, rbio, inode_inum(inode), 302 304 &readpages_iter); 303 - bch2_trans_unlock(&trans); 305 + bch2_trans_unlock(trans); 304 306 } 305 307 306 308 bch2_pagecache_add_put(inode); 307 309 308 - bch2_trans_exit(&trans); 310 + bch2_trans_put(trans); 309 311 darray_exit(&readpages_iter.folios); 310 312 } 311 313 312 314 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio, 313 315 subvol_inum inum, struct folio *folio) 314 316 { 315 - struct btree_trans trans; 316 - 317 317 bch2_folio_create(folio, __GFP_NOFAIL); 318 318 319 319 rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC; 320 320 rbio->bio.bi_iter.bi_sector = folio_sector(folio); 321 321 BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0)); 322 322 323 - bch2_trans_init(&trans, c, 0, 0); 324 - bchfs_read(&trans, rbio, inum, NULL); 325 - bch2_trans_exit(&trans); 323 + bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0)); 326 324 } 327 325 328 326 static void bch2_read_single_folio_end_io(struct bio *bio)
+6 -8
fs/bcachefs/fs-io-direct.c
··· 234 234 u64 offset, u64 size, 235 235 unsigned nr_replicas, bool compressed) 236 236 { 237 - struct btree_trans trans; 237 + struct btree_trans *trans = bch2_trans_get(c); 238 238 struct btree_iter iter; 239 239 struct bkey_s_c k; 240 240 u64 end = offset + size; 241 241 u32 snapshot; 242 242 bool ret = true; 243 243 int err; 244 - 245 - bch2_trans_init(&trans, c, 0, 0); 246 244 retry: 247 - bch2_trans_begin(&trans); 245 + bch2_trans_begin(trans); 248 246 249 - err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 247 + err = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 250 248 if (err) 251 249 goto err; 252 250 253 - for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, 251 + for_each_btree_key_norestart(trans, iter, BTREE_ID_extents, 254 252 SPOS(inum.inum, offset, snapshot), 255 253 BTREE_ITER_SLOTS, k, err) { 256 254 if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end))) ··· 263 265 } 264 266 265 267 offset = iter.pos.offset; 266 - bch2_trans_iter_exit(&trans, &iter); 268 + bch2_trans_iter_exit(trans, &iter); 267 269 err: 268 270 if (bch2_err_matches(err, BCH_ERR_transaction_restart)) 269 271 goto retry; 270 - bch2_trans_exit(&trans); 272 + bch2_trans_put(trans); 271 273 272 274 return err ? false : ret; 273 275 }
+7 -7
fs/bcachefs/fs-io-pagecache.c
··· 182 182 int bch2_folio_set(struct bch_fs *c, subvol_inum inum, 183 183 struct folio **fs, unsigned nr_folios) 184 184 { 185 - struct btree_trans trans; 185 + struct btree_trans *trans; 186 186 struct btree_iter iter; 187 187 struct bkey_s_c k; 188 188 struct bch_folio *s; ··· 204 204 return 0; 205 205 206 206 folio_idx = 0; 207 - bch2_trans_init(&trans, c, 0, 0); 207 + trans = bch2_trans_get(c); 208 208 retry: 209 - bch2_trans_begin(&trans); 209 + bch2_trans_begin(trans); 210 210 211 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 211 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 212 212 if (ret) 213 213 goto err; 214 214 215 - for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, 215 + for_each_btree_key_norestart(trans, iter, BTREE_ID_extents, 216 216 SPOS(inum.inum, offset, snapshot), 217 217 BTREE_ITER_SLOTS, k, ret) { 218 218 unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k); ··· 243 243 } 244 244 245 245 offset = iter.pos.offset; 246 - bch2_trans_iter_exit(&trans, &iter); 246 + bch2_trans_iter_exit(trans, &iter); 247 247 err: 248 248 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 249 249 goto retry; 250 - bch2_trans_exit(&trans); 250 + bch2_trans_put(trans); 251 251 252 252 return ret; 253 253 }
+38 -46
fs/bcachefs/fs-io.c
··· 207 207 struct bpos start, 208 208 struct bpos end) 209 209 { 210 - struct btree_trans trans; 210 + struct btree_trans *trans = bch2_trans_get(c); 211 211 struct btree_iter iter; 212 212 struct bkey_s_c k; 213 213 int ret = 0; 214 - 215 - bch2_trans_init(&trans, c, 0, 0); 216 214 retry: 217 - bch2_trans_begin(&trans); 215 + bch2_trans_begin(trans); 218 216 219 - ret = bch2_subvolume_get_snapshot(&trans, subvol, &start.snapshot); 217 + ret = bch2_subvolume_get_snapshot(trans, subvol, &start.snapshot); 220 218 if (ret) 221 219 goto err; 222 220 223 - for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret) 221 + for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, start, end, 0, k, ret) 224 222 if (bkey_extent_is_data(k.k) && !bkey_extent_is_unwritten(k)) { 225 223 ret = 1; 226 224 break; 227 225 } 228 226 start = iter.pos; 229 - bch2_trans_iter_exit(&trans, &iter); 227 + bch2_trans_iter_exit(trans, &iter); 230 228 err: 231 229 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 232 230 goto retry; 233 231 234 - bch2_trans_exit(&trans); 232 + bch2_trans_put(trans); 235 233 return ret; 236 234 } 237 235 ··· 580 582 u64 start_sector, u64 end_sector) 581 583 { 582 584 struct bch_fs *c = inode->v.i_sb->s_fs_info; 583 - struct btree_trans trans; 585 + struct btree_trans *trans = bch2_trans_get(c); 584 586 struct btree_iter iter; 585 587 struct bpos end_pos = POS(inode->v.i_ino, end_sector); 586 588 struct bch_io_opts opts; 587 589 int ret = 0; 588 590 589 591 bch2_inode_opts_get(&opts, c, &inode->ei_inode); 590 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512); 591 592 592 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 593 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 593 594 POS(inode->v.i_ino, start_sector), 594 595 BTREE_ITER_SLOTS|BTREE_ITER_INTENT); 595 596 ··· 601 604 u64 hole_start, hole_end; 602 605 u32 snapshot; 603 606 604 - bch2_trans_begin(&trans); 607 + bch2_trans_begin(trans); 605 608 606 - ret = bch2_subvolume_get_snapshot(&trans, 609 + ret = bch2_subvolume_get_snapshot(trans, 607 610 inode->ei_subvol, &snapshot); 608 611 if (ret) 609 612 goto bkey_err; ··· 640 643 &hole_start, 641 644 &hole_end, 642 645 opts.data_replicas, true)) 643 - ret = drop_locks_do(&trans, 646 + ret = drop_locks_do(trans, 644 647 (bch2_clamp_data_hole(&inode->v, 645 648 &hole_start, 646 649 &hole_end, ··· 663 666 goto bkey_err; 664 667 } 665 668 666 - ret = bch2_extent_fallocate(&trans, inode_inum(inode), &iter, 669 + ret = bch2_extent_fallocate(trans, inode_inum(inode), &iter, 667 670 sectors, opts, &i_sectors_delta, 668 671 writepoint_hashed((unsigned long) current)); 669 672 if (ret) ··· 671 674 672 675 bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta); 673 676 674 - drop_locks_do(&trans, 677 + drop_locks_do(trans, 675 678 (bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0)); 676 679 bkey_err: 677 680 bch2_quota_reservation_put(c, inode, &quota_res); ··· 683 686 struct quota_res quota_res = { 0 }; 684 687 s64 i_sectors_delta = 0; 685 688 686 - bch2_fpunch_at(&trans, &iter, inode_inum(inode), 689 + bch2_fpunch_at(trans, &iter, inode_inum(inode), 687 690 end_sector, &i_sectors_delta); 688 691 bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta); 689 692 bch2_quota_reservation_put(c, inode, &quota_res); 690 693 } 691 694 692 - bch2_trans_iter_exit(&trans, &iter); 693 - bch2_trans_exit(&trans); 695 + bch2_trans_iter_exit(trans, &iter); 696 + bch2_trans_put(trans); 694 697 return ret; 695 698 } 696 699 ··· 796 799 u64 start, u64 end) 797 800 { 798 801 struct bch_fs *c = inode->v.i_sb->s_fs_info; 799 - struct btree_trans trans; 802 + struct btree_trans *trans = bch2_trans_get(c); 800 803 struct btree_iter iter; 801 804 struct bkey_s_c k; 802 805 u32 snapshot; 803 806 u64 sectors = end - start; 804 807 u64 pos = start; 805 808 int ret; 806 - 807 - bch2_trans_init(&trans, c, 0, 0); 808 809 retry: 809 - bch2_trans_begin(&trans); 810 + bch2_trans_begin(trans); 810 811 811 - ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot); 812 + ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot); 812 813 if (ret) 813 814 goto err; 814 815 815 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 816 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 816 817 SPOS(inode->v.i_ino, pos, snapshot), 0); 817 818 818 - while (!(ret = btree_trans_too_many_iters(&trans)) && 819 + while (!(ret = btree_trans_too_many_iters(trans)) && 819 820 (k = bch2_btree_iter_peek_upto(&iter, POS(inode->v.i_ino, end - 1))).k && 820 821 !(ret = bkey_err(k))) { 821 822 if (bkey_extent_is_allocation(k.k)) { ··· 825 830 bch2_btree_iter_advance(&iter); 826 831 } 827 832 pos = iter.pos.offset; 828 - bch2_trans_iter_exit(&trans, &iter); 833 + bch2_trans_iter_exit(trans, &iter); 829 834 err: 830 835 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 831 836 goto retry; 832 837 833 - bch2_trans_exit(&trans); 838 + bch2_trans_put(trans); 834 839 835 - if (ret) 836 - return ret; 837 - 838 - return bch2_quota_reservation_add(c, inode, res, sectors, true); 840 + return ret ?: bch2_quota_reservation_add(c, inode, res, sectors, true); 839 841 } 840 842 841 843 loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, ··· 925 933 { 926 934 struct bch_inode_info *inode = file_bch_inode(file); 927 935 struct bch_fs *c = inode->v.i_sb->s_fs_info; 928 - struct btree_trans trans; 936 + struct btree_trans *trans; 929 937 struct btree_iter iter; 930 938 struct bkey_s_c k; 931 939 subvol_inum inum = inode_inum(inode); ··· 937 945 if (offset >= isize) 938 946 return -ENXIO; 939 947 940 - bch2_trans_init(&trans, c, 0, 0); 948 + trans = bch2_trans_get(c); 941 949 retry: 942 - bch2_trans_begin(&trans); 950 + bch2_trans_begin(trans); 943 951 944 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 952 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 945 953 if (ret) 946 954 goto err; 947 955 948 - for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, 956 + for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_extents, 949 957 SPOS(inode->v.i_ino, offset >> 9, snapshot), 950 958 POS(inode->v.i_ino, U64_MAX), 951 959 0, k, ret) { ··· 955 963 } else if (k.k->p.offset >> 9 > isize) 956 964 break; 957 965 } 958 - bch2_trans_iter_exit(&trans, &iter); 966 + bch2_trans_iter_exit(trans, &iter); 959 967 err: 960 968 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 961 969 goto retry; 962 970 963 - bch2_trans_exit(&trans); 971 + bch2_trans_put(trans); 964 972 if (ret) 965 973 return ret; 966 974 ··· 978 986 { 979 987 struct bch_inode_info *inode = file_bch_inode(file); 980 988 struct bch_fs *c = inode->v.i_sb->s_fs_info; 981 - struct btree_trans trans; 989 + struct btree_trans *trans; 982 990 struct btree_iter iter; 983 991 struct bkey_s_c k; 984 992 subvol_inum inum = inode_inum(inode); ··· 990 998 if (offset >= isize) 991 999 return -ENXIO; 992 1000 993 - bch2_trans_init(&trans, c, 0, 0); 1001 + trans = bch2_trans_get(c); 994 1002 retry: 995 - bch2_trans_begin(&trans); 1003 + bch2_trans_begin(trans); 996 1004 997 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 1005 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 998 1006 if (ret) 999 1007 goto err; 1000 1008 1001 - for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, 1009 + for_each_btree_key_norestart(trans, iter, BTREE_ID_extents, 1002 1010 SPOS(inode->v.i_ino, offset >> 9, snapshot), 1003 1011 BTREE_ITER_SLOTS, k, ret) { 1004 1012 if (k.k->p.inode != inode->v.i_ino) { ··· 1016 1024 offset = max(offset, bkey_start_offset(k.k) << 9); 1017 1025 } 1018 1026 } 1019 - bch2_trans_iter_exit(&trans, &iter); 1027 + bch2_trans_iter_exit(trans, &iter); 1020 1028 err: 1021 1029 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1022 1030 goto retry; 1023 1031 1024 - bch2_trans_exit(&trans); 1032 + bch2_trans_put(trans); 1025 1033 if (ret) 1026 1034 return ret; 1027 1035
+80 -86
fs/bcachefs/fs.c
··· 82 82 inode_set_fn set, 83 83 void *p, unsigned fields) 84 84 { 85 - struct btree_trans trans; 85 + struct btree_trans *trans = bch2_trans_get(c); 86 86 struct btree_iter iter = { NULL }; 87 87 struct bch_inode_unpacked inode_u; 88 88 int ret; 89 - 90 - bch2_trans_init(&trans, c, 0, 512); 91 89 retry: 92 - bch2_trans_begin(&trans); 90 + bch2_trans_begin(trans); 93 91 94 - ret = bch2_inode_peek(&trans, &iter, &inode_u, inode_inum(inode), 92 + ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode), 95 93 BTREE_ITER_INTENT) ?: 96 - (set ? set(&trans, inode, &inode_u, p) : 0) ?: 97 - bch2_inode_write(&trans, &iter, &inode_u) ?: 98 - bch2_trans_commit(&trans, NULL, NULL, BTREE_INSERT_NOFAIL); 94 + (set ? set(trans, inode, &inode_u, p) : 0) ?: 95 + bch2_inode_write(trans, &iter, &inode_u) ?: 96 + bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL); 99 97 100 98 /* 101 99 * the btree node lock protects inode->ei_inode, not ei_update_lock; 102 100 * this is important for inode updates via bchfs_write_index_update 103 101 */ 104 102 if (!ret) 105 - bch2_inode_update_after_write(&trans, inode, &inode_u, fields); 103 + bch2_inode_update_after_write(trans, inode, &inode_u, fields); 106 104 107 - bch2_trans_iter_exit(&trans, &iter); 105 + bch2_trans_iter_exit(trans, &iter); 108 106 109 107 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 110 108 goto retry; ··· 112 114 inode_inum(inode).subvol, 113 115 inode_inum(inode).inum); 114 116 115 - bch2_trans_exit(&trans); 117 + bch2_trans_put(trans); 116 118 return ret < 0 ? ret : 0; 117 119 } 118 120 ··· 180 182 { 181 183 struct bch_inode_unpacked inode_u; 182 184 struct bch_inode_info *inode; 183 - struct btree_trans trans; 185 + struct btree_trans *trans; 184 186 struct bch_subvolume subvol; 185 187 int ret; 186 188 ··· 194 196 if (!(inode->v.i_state & I_NEW)) 195 197 return &inode->v; 196 198 197 - bch2_trans_init(&trans, c, 8, 0); 198 - ret = lockrestart_do(&trans, 199 - bch2_subvolume_get(&trans, inum.subvol, true, 0, &subvol) ?: 200 - bch2_inode_find_by_inum_trans(&trans, inum, &inode_u)); 199 + trans = bch2_trans_get(c); 200 + ret = lockrestart_do(trans, 201 + bch2_subvolume_get(trans, inum.subvol, true, 0, &subvol) ?: 202 + bch2_inode_find_by_inum_trans(trans, inum, &inode_u)); 201 203 202 204 if (!ret) 203 - bch2_vfs_inode_init(&trans, inum, inode, &inode_u, &subvol); 204 - bch2_trans_exit(&trans); 205 + bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol); 206 + bch2_trans_put(trans); 205 207 206 208 if (ret) { 207 209 iget_failed(&inode->v); ··· 224 226 unsigned flags) 225 227 { 226 228 struct bch_fs *c = dir->v.i_sb->s_fs_info; 227 - struct btree_trans trans; 229 + struct btree_trans *trans; 228 230 struct bch_inode_unpacked dir_u; 229 231 struct bch_inode_info *inode, *old; 230 232 struct bch_inode_unpacked inode_u; ··· 254 256 if (!(flags & BCH_CREATE_TMPFILE)) 255 257 mutex_lock(&dir->ei_update_lock); 256 258 257 - bch2_trans_init(&trans, c, 8, 258 - 2048 + (!(flags & BCH_CREATE_TMPFILE) 259 - ? dentry->d_name.len : 0)); 259 + trans = bch2_trans_get(c); 260 260 retry: 261 - bch2_trans_begin(&trans); 261 + bch2_trans_begin(trans); 262 262 263 - ret = bch2_create_trans(&trans, 263 + ret = bch2_create_trans(trans, 264 264 inode_inum(dir), &dir_u, &inode_u, 265 265 !(flags & BCH_CREATE_TMPFILE) 266 266 ? &dentry->d_name : NULL, ··· 274 278 inum.subvol = inode_u.bi_subvol ?: dir->ei_subvol; 275 279 inum.inum = inode_u.bi_inum; 276 280 277 - ret = bch2_subvolume_get(&trans, inum.subvol, true, 281 + ret = bch2_subvolume_get(trans, inum.subvol, true, 278 282 BTREE_ITER_WITH_UPDATES, &subvol) ?: 279 - bch2_trans_commit(&trans, NULL, &journal_seq, 0); 283 + bch2_trans_commit(trans, NULL, &journal_seq, 0); 280 284 if (unlikely(ret)) { 281 285 bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, 282 286 KEY_TYPE_QUOTA_WARN); ··· 287 291 } 288 292 289 293 if (!(flags & BCH_CREATE_TMPFILE)) { 290 - bch2_inode_update_after_write(&trans, dir, &dir_u, 294 + bch2_inode_update_after_write(trans, dir, &dir_u, 291 295 ATTR_MTIME|ATTR_CTIME); 292 296 mutex_unlock(&dir->ei_update_lock); 293 297 } 294 298 295 299 bch2_iget5_set(&inode->v, &inum); 296 - bch2_vfs_inode_init(&trans, inum, inode, &inode_u, &subvol); 300 + bch2_vfs_inode_init(trans, inum, inode, &inode_u, &subvol); 297 301 298 302 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); 299 303 set_cached_acl(&inode->v, ACL_TYPE_DEFAULT, default_acl); ··· 333 337 unlock_new_inode(&inode->v); 334 338 } 335 339 336 - bch2_trans_exit(&trans); 340 + bch2_trans_put(trans); 337 341 err: 338 342 posix_acl_release(default_acl); 339 343 posix_acl_release(acl); ··· 342 346 if (!(flags & BCH_CREATE_TMPFILE)) 343 347 mutex_unlock(&dir->ei_update_lock); 344 348 345 - bch2_trans_exit(&trans); 349 + bch2_trans_put(trans); 346 350 make_bad_inode(&inode->v); 347 351 iput(&inode->v); 348 352 inode = ERR_PTR(ret); ··· 397 401 struct bch_inode_info *dir, 398 402 struct dentry *dentry) 399 403 { 400 - struct btree_trans trans; 404 + struct btree_trans *trans = bch2_trans_get(c); 401 405 struct bch_inode_unpacked dir_u, inode_u; 402 406 int ret; 403 407 404 408 mutex_lock(&inode->ei_update_lock); 405 - bch2_trans_init(&trans, c, 4, 1024); 406 409 407 - ret = commit_do(&trans, NULL, NULL, 0, 408 - bch2_link_trans(&trans, 410 + ret = commit_do(trans, NULL, NULL, 0, 411 + bch2_link_trans(trans, 409 412 inode_inum(dir), &dir_u, 410 413 inode_inum(inode), &inode_u, 411 414 &dentry->d_name)); 412 415 413 416 if (likely(!ret)) { 414 - bch2_inode_update_after_write(&trans, dir, &dir_u, 417 + bch2_inode_update_after_write(trans, dir, &dir_u, 415 418 ATTR_MTIME|ATTR_CTIME); 416 - bch2_inode_update_after_write(&trans, inode, &inode_u, ATTR_CTIME); 419 + bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME); 417 420 } 418 421 419 - bch2_trans_exit(&trans); 422 + bch2_trans_put(trans); 420 423 mutex_unlock(&inode->ei_update_lock); 421 424 return ret; 422 425 } ··· 446 451 struct bch_inode_info *dir = to_bch_ei(vdir); 447 452 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 448 453 struct bch_inode_unpacked dir_u, inode_u; 449 - struct btree_trans trans; 454 + struct btree_trans *trans = bch2_trans_get(c); 450 455 int ret; 451 456 452 457 bch2_lock_inodes(INODE_UPDATE_LOCK, dir, inode); 453 - bch2_trans_init(&trans, c, 4, 1024); 454 458 455 - ret = commit_do(&trans, NULL, NULL, 459 + ret = commit_do(trans, NULL, NULL, 456 460 BTREE_INSERT_NOFAIL, 457 - bch2_unlink_trans(&trans, 461 + bch2_unlink_trans(trans, 458 462 inode_inum(dir), &dir_u, 459 463 &inode_u, &dentry->d_name, 460 464 deleting_snapshot)); 461 465 if (unlikely(ret)) 462 466 goto err; 463 467 464 - bch2_inode_update_after_write(&trans, dir, &dir_u, 468 + bch2_inode_update_after_write(trans, dir, &dir_u, 465 469 ATTR_MTIME|ATTR_CTIME); 466 - bch2_inode_update_after_write(&trans, inode, &inode_u, 470 + bch2_inode_update_after_write(trans, inode, &inode_u, 467 471 ATTR_MTIME); 468 472 469 473 if (inode_u.bi_subvol) { ··· 473 479 set_nlink(&inode->v, 0); 474 480 } 475 481 err: 476 - bch2_trans_exit(&trans); 477 482 bch2_unlock_inodes(INODE_UPDATE_LOCK, dir, inode); 483 + bch2_trans_put(trans); 478 484 479 485 return ret; 480 486 } ··· 537 543 struct bch_inode_info *dst_inode = to_bch_ei(dst_dentry->d_inode); 538 544 struct bch_inode_unpacked dst_dir_u, src_dir_u; 539 545 struct bch_inode_unpacked src_inode_u, dst_inode_u; 540 - struct btree_trans trans; 546 + struct btree_trans *trans; 541 547 enum bch_rename_mode mode = flags & RENAME_EXCHANGE 542 548 ? BCH_RENAME_EXCHANGE 543 549 : dst_dentry->d_inode ··· 554 560 return ret; 555 561 } 556 562 557 - bch2_trans_init(&trans, c, 8, 2048); 563 + trans = bch2_trans_get(c); 558 564 559 565 bch2_lock_inodes(INODE_UPDATE_LOCK, 560 566 src_dir, ··· 581 587 goto err; 582 588 } 583 589 584 - ret = commit_do(&trans, NULL, NULL, 0, 585 - bch2_rename_trans(&trans, 590 + ret = commit_do(trans, NULL, NULL, 0, 591 + bch2_rename_trans(trans, 586 592 inode_inum(src_dir), &src_dir_u, 587 593 inode_inum(dst_dir), &dst_dir_u, 588 594 &src_inode_u, ··· 597 603 BUG_ON(dst_inode && 598 604 dst_inode->v.i_ino != dst_inode_u.bi_inum); 599 605 600 - bch2_inode_update_after_write(&trans, src_dir, &src_dir_u, 606 + bch2_inode_update_after_write(trans, src_dir, &src_dir_u, 601 607 ATTR_MTIME|ATTR_CTIME); 602 608 603 609 if (src_dir != dst_dir) 604 - bch2_inode_update_after_write(&trans, dst_dir, &dst_dir_u, 610 + bch2_inode_update_after_write(trans, dst_dir, &dst_dir_u, 605 611 ATTR_MTIME|ATTR_CTIME); 606 612 607 - bch2_inode_update_after_write(&trans, src_inode, &src_inode_u, 613 + bch2_inode_update_after_write(trans, src_inode, &src_inode_u, 608 614 ATTR_CTIME); 609 615 610 616 if (dst_inode) 611 - bch2_inode_update_after_write(&trans, dst_inode, &dst_inode_u, 617 + bch2_inode_update_after_write(trans, dst_inode, &dst_inode_u, 612 618 ATTR_CTIME); 613 619 err: 614 - bch2_trans_exit(&trans); 620 + bch2_trans_put(trans); 615 621 616 622 bch2_fs_quota_transfer(c, src_inode, 617 623 bch_qid(&src_inode->ei_inode), ··· 674 680 { 675 681 struct bch_fs *c = inode->v.i_sb->s_fs_info; 676 682 struct bch_qid qid; 677 - struct btree_trans trans; 683 + struct btree_trans *trans; 678 684 struct btree_iter inode_iter = { NULL }; 679 685 struct bch_inode_unpacked inode_u; 680 686 struct posix_acl *acl = NULL; ··· 695 701 if (ret) 696 702 goto err; 697 703 698 - bch2_trans_init(&trans, c, 0, 0); 704 + trans = bch2_trans_get(c); 699 705 retry: 700 - bch2_trans_begin(&trans); 706 + bch2_trans_begin(trans); 701 707 kfree(acl); 702 708 acl = NULL; 703 709 704 - ret = bch2_inode_peek(&trans, &inode_iter, &inode_u, inode_inum(inode), 710 + ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode), 705 711 BTREE_ITER_INTENT); 706 712 if (ret) 707 713 goto btree_err; ··· 709 715 bch2_setattr_copy(idmap, inode, &inode_u, attr); 710 716 711 717 if (attr->ia_valid & ATTR_MODE) { 712 - ret = bch2_acl_chmod(&trans, inode_inum(inode), &inode_u, 718 + ret = bch2_acl_chmod(trans, inode_inum(inode), &inode_u, 713 719 inode_u.bi_mode, &acl); 714 720 if (ret) 715 721 goto btree_err; 716 722 } 717 723 718 - ret = bch2_inode_write(&trans, &inode_iter, &inode_u) ?: 719 - bch2_trans_commit(&trans, NULL, NULL, 724 + ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?: 725 + bch2_trans_commit(trans, NULL, NULL, 720 726 BTREE_INSERT_NOFAIL); 721 727 btree_err: 722 - bch2_trans_iter_exit(&trans, &inode_iter); 728 + bch2_trans_iter_exit(trans, &inode_iter); 723 729 724 730 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 725 731 goto retry; 726 732 if (unlikely(ret)) 727 733 goto err_trans; 728 734 729 - bch2_inode_update_after_write(&trans, inode, &inode_u, attr->ia_valid); 735 + bch2_inode_update_after_write(trans, inode, &inode_u, attr->ia_valid); 730 736 731 737 if (acl) 732 738 set_cached_acl(&inode->v, ACL_TYPE_ACCESS, acl); 733 739 err_trans: 734 - bch2_trans_exit(&trans); 740 + bch2_trans_put(trans); 735 741 err: 736 742 mutex_unlock(&inode->ei_update_lock); 737 743 ··· 873 879 { 874 880 struct bch_fs *c = vinode->i_sb->s_fs_info; 875 881 struct bch_inode_info *ei = to_bch_ei(vinode); 876 - struct btree_trans trans; 882 + struct btree_trans *trans; 877 883 struct btree_iter iter; 878 884 struct bkey_s_c k; 879 885 struct bkey_buf cur, prev; ··· 894 900 895 901 bch2_bkey_buf_init(&cur); 896 902 bch2_bkey_buf_init(&prev); 897 - bch2_trans_init(&trans, c, 0, 0); 903 + trans = bch2_trans_get(c); 898 904 retry: 899 - bch2_trans_begin(&trans); 905 + bch2_trans_begin(trans); 900 906 901 - ret = bch2_subvolume_get_snapshot(&trans, ei->ei_subvol, &snapshot); 907 + ret = bch2_subvolume_get_snapshot(trans, ei->ei_subvol, &snapshot); 902 908 if (ret) 903 909 goto err; 904 910 905 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 911 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 906 912 SPOS(ei->v.i_ino, start, snapshot), 0); 907 913 908 - while (!(ret = btree_trans_too_many_iters(&trans)) && 914 + while (!(ret = btree_trans_too_many_iters(trans)) && 909 915 (k = bch2_btree_iter_peek_upto(&iter, end)).k && 910 916 !(ret = bkey_err(k))) { 911 917 enum btree_id data_btree = BTREE_ID_extents; ··· 922 928 923 929 bch2_bkey_buf_reassemble(&cur, c, k); 924 930 925 - ret = bch2_read_indirect_extent(&trans, &data_btree, 931 + ret = bch2_read_indirect_extent(trans, &data_btree, 926 932 &offset_into_extent, &cur); 927 933 if (ret) 928 934 break; ··· 941 947 cur.k->k.p.offset += cur.k->k.size; 942 948 943 949 if (have_extent) { 944 - bch2_trans_unlock(&trans); 950 + bch2_trans_unlock(trans); 945 951 ret = bch2_fill_extent(c, info, 946 952 bkey_i_to_s_c(prev.k), 0); 947 953 if (ret) ··· 955 961 POS(iter.pos.inode, iter.pos.offset + sectors)); 956 962 } 957 963 start = iter.pos.offset; 958 - bch2_trans_iter_exit(&trans, &iter); 964 + bch2_trans_iter_exit(trans, &iter); 959 965 err: 960 966 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 961 967 goto retry; 962 968 963 969 if (!ret && have_extent) { 964 - bch2_trans_unlock(&trans); 970 + bch2_trans_unlock(trans); 965 971 ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k), 966 972 FIEMAP_EXTENT_LAST); 967 973 } 968 974 969 - bch2_trans_exit(&trans); 975 + bch2_trans_put(trans); 970 976 bch2_bkey_buf_exit(&cur, c); 971 977 bch2_bkey_buf_exit(&prev, c); 972 978 return ret < 0 ? ret : 0; ··· 1224 1230 struct bch_inode_info *inode = to_bch_ei(child->d_inode); 1225 1231 struct bch_inode_info *dir = to_bch_ei(parent->d_inode); 1226 1232 struct bch_fs *c = inode->v.i_sb->s_fs_info; 1227 - struct btree_trans trans; 1233 + struct btree_trans *trans; 1228 1234 struct btree_iter iter1; 1229 1235 struct btree_iter iter2; 1230 1236 struct bkey_s_c k; ··· 1239 1245 if (!S_ISDIR(dir->v.i_mode)) 1240 1246 return -EINVAL; 1241 1247 1242 - bch2_trans_init(&trans, c, 0, 0); 1248 + trans = bch2_trans_get(c); 1243 1249 1244 - bch2_trans_iter_init(&trans, &iter1, BTREE_ID_dirents, 1250 + bch2_trans_iter_init(trans, &iter1, BTREE_ID_dirents, 1245 1251 POS(dir->ei_inode.bi_inum, 0), 0); 1246 - bch2_trans_iter_init(&trans, &iter2, BTREE_ID_dirents, 1252 + bch2_trans_iter_init(trans, &iter2, BTREE_ID_dirents, 1247 1253 POS(dir->ei_inode.bi_inum, 0), 0); 1248 1254 retry: 1249 - bch2_trans_begin(&trans); 1255 + bch2_trans_begin(trans); 1250 1256 1251 - ret = bch2_subvolume_get_snapshot(&trans, dir->ei_subvol, &snapshot); 1257 + ret = bch2_subvolume_get_snapshot(trans, dir->ei_subvol, &snapshot); 1252 1258 if (ret) 1253 1259 goto err; 1254 1260 1255 1261 bch2_btree_iter_set_snapshot(&iter1, snapshot); 1256 1262 bch2_btree_iter_set_snapshot(&iter2, snapshot); 1257 1263 1258 - ret = bch2_inode_find_by_inum_trans(&trans, inode_inum(inode), &inode_u); 1264 + ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u); 1259 1265 if (ret) 1260 1266 goto err; 1261 1267 ··· 1273 1279 } 1274 1280 1275 1281 d = bkey_s_c_to_dirent(k); 1276 - ret = bch2_dirent_read_target(&trans, inode_inum(dir), d, &target); 1282 + ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target); 1277 1283 if (ret > 0) 1278 1284 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode; 1279 1285 if (ret) ··· 1295 1301 continue; 1296 1302 1297 1303 d = bkey_s_c_to_dirent(k); 1298 - ret = bch2_dirent_read_target(&trans, inode_inum(dir), d, &target); 1304 + ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target); 1299 1305 if (ret < 0) 1300 1306 break; 1301 1307 if (ret) ··· 1319 1325 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1320 1326 goto retry; 1321 1327 1322 - bch2_trans_iter_exit(&trans, &iter1); 1323 - bch2_trans_iter_exit(&trans, &iter2); 1324 - bch2_trans_exit(&trans); 1328 + bch2_trans_iter_exit(trans, &iter1); 1329 + bch2_trans_iter_exit(trans, &iter2); 1330 + bch2_trans_put(trans); 1325 1331 1326 1332 return ret; 1327 1333 }
+38 -58
fs/bcachefs/fsck.c
··· 987 987 int bch2_check_inodes(struct bch_fs *c) 988 988 { 989 989 bool full = c->opts.fsck; 990 - struct btree_trans trans; 990 + struct btree_trans *trans = bch2_trans_get(c); 991 991 struct btree_iter iter; 992 992 struct bch_inode_unpacked prev = { 0 }; 993 993 struct snapshots_seen s; ··· 995 995 int ret; 996 996 997 997 snapshots_seen_init(&s); 998 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 999 998 1000 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes, 999 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, 1001 1000 POS_MIN, 1002 1001 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 1003 1002 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 1004 - check_inode(&trans, &iter, k, &prev, &s, full)); 1003 + check_inode(trans, &iter, k, &prev, &s, full)); 1005 1004 1006 - bch2_trans_exit(&trans); 1007 1005 snapshots_seen_exit(&s); 1006 + bch2_trans_put(trans); 1008 1007 if (ret) 1009 1008 bch_err_fn(c, ret); 1010 1009 return ret; ··· 1436 1437 { 1437 1438 struct inode_walker w = inode_walker_init(); 1438 1439 struct snapshots_seen s; 1439 - struct btree_trans trans; 1440 + struct btree_trans *trans = bch2_trans_get(c); 1440 1441 struct btree_iter iter; 1441 1442 struct bkey_s_c k; 1442 1443 struct extent_ends extent_ends; ··· 1445 1446 1446 1447 snapshots_seen_init(&s); 1447 1448 extent_ends_init(&extent_ends); 1448 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096); 1449 1449 1450 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents, 1450 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_extents, 1451 1451 POS(BCACHEFS_ROOT_INO, 0), 1452 1452 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 1453 1453 &res, NULL, 1454 1454 BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({ 1455 1455 bch2_disk_reservation_put(c, &res); 1456 - check_extent(&trans, &iter, k, &w, &s, &extent_ends); 1456 + check_extent(trans, &iter, k, &w, &s, &extent_ends); 1457 1457 })) ?: 1458 - check_i_sectors(&trans, &w); 1458 + check_i_sectors(trans, &w); 1459 1459 1460 1460 bch2_disk_reservation_put(c, &res); 1461 1461 extent_ends_exit(&extent_ends); 1462 1462 inode_walker_exit(&w); 1463 - bch2_trans_exit(&trans); 1464 1463 snapshots_seen_exit(&s); 1464 + bch2_trans_put(trans); 1465 1465 1466 1466 if (ret) 1467 1467 bch_err_fn(c, ret); ··· 1801 1803 struct inode_walker target = inode_walker_init(); 1802 1804 struct snapshots_seen s; 1803 1805 struct bch_hash_info hash_info; 1804 - struct btree_trans trans; 1806 + struct btree_trans *trans = bch2_trans_get(c); 1805 1807 struct btree_iter iter; 1806 1808 struct bkey_s_c k; 1807 1809 int ret = 0; 1808 1810 1809 1811 snapshots_seen_init(&s); 1810 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 1811 1812 1812 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_dirents, 1813 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents, 1813 1814 POS(BCACHEFS_ROOT_INO, 0), 1814 1815 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, 1815 1816 k, 1816 1817 NULL, NULL, 1817 1818 BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 1818 - check_dirent(&trans, &iter, k, &hash_info, &dir, &target, &s)); 1819 + check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)); 1819 1820 1820 - bch2_trans_exit(&trans); 1821 + bch2_trans_put(trans); 1821 1822 snapshots_seen_exit(&s); 1822 1823 inode_walker_exit(&dir); 1823 1824 inode_walker_exit(&target); ··· 1870 1873 { 1871 1874 struct inode_walker inode = inode_walker_init(); 1872 1875 struct bch_hash_info hash_info; 1873 - struct btree_trans trans; 1874 1876 struct btree_iter iter; 1875 1877 struct bkey_s_c k; 1876 1878 int ret = 0; 1877 1879 1878 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 1879 - 1880 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs, 1880 + ret = bch2_trans_run(c, 1881 + for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, 1881 1882 POS(BCACHEFS_ROOT_INO, 0), 1882 1883 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, 1883 1884 k, 1884 1885 NULL, NULL, 1885 1886 BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 1886 - check_xattr(&trans, &iter, k, &hash_info, &inode)); 1887 - 1888 - bch2_trans_exit(&trans); 1889 - 1887 + check_xattr(trans, &iter, k, &hash_info, &inode))); 1890 1888 if (ret) 1891 1889 bch_err_fn(c, ret); 1892 1890 return ret; ··· 1950 1958 ret = bch2_trans_do(c, NULL, NULL, 1951 1959 BTREE_INSERT_NOFAIL| 1952 1960 BTREE_INSERT_LAZY_RW, 1953 - check_root_trans(&trans)); 1961 + check_root_trans(trans)); 1954 1962 1955 1963 if (ret) 1956 1964 bch_err_fn(c, ret); ··· 2102 2110 */ 2103 2111 int bch2_check_directory_structure(struct bch_fs *c) 2104 2112 { 2105 - struct btree_trans trans; 2113 + struct btree_trans *trans = bch2_trans_get(c); 2106 2114 struct btree_iter iter; 2107 2115 struct bkey_s_c k; 2108 2116 struct bch_inode_unpacked u; 2109 2117 pathbuf path = { 0, }; 2110 2118 int ret; 2111 2119 2112 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 2113 - 2114 - for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN, 2120 + for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN, 2115 2121 BTREE_ITER_INTENT| 2116 2122 BTREE_ITER_PREFETCH| 2117 2123 BTREE_ITER_ALL_SNAPSHOTS, k, ret) { ··· 2126 2136 if (u.bi_flags & BCH_INODE_UNLINKED) 2127 2137 continue; 2128 2138 2129 - ret = check_path(&trans, &path, &u, iter.pos.snapshot); 2139 + ret = check_path(trans, &path, &u, iter.pos.snapshot); 2130 2140 if (ret) 2131 2141 break; 2132 2142 } 2133 - bch2_trans_iter_exit(&trans, &iter); 2134 - bch2_trans_exit(&trans); 2143 + bch2_trans_iter_exit(trans, &iter); 2144 + bch2_trans_put(trans); 2135 2145 darray_exit(&path); 2136 2146 2137 2147 if (ret) ··· 2220 2230 struct nlink_table *t, 2221 2231 u64 start, u64 *end) 2222 2232 { 2223 - struct btree_trans trans; 2233 + struct btree_trans *trans = bch2_trans_get(c); 2224 2234 struct btree_iter iter; 2225 2235 struct bkey_s_c k; 2226 2236 struct bch_inode_unpacked u; 2227 2237 int ret = 0; 2228 2238 2229 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 2230 - 2231 - for_each_btree_key(&trans, iter, BTREE_ID_inodes, 2239 + for_each_btree_key(trans, iter, BTREE_ID_inodes, 2232 2240 POS(0, start), 2233 2241 BTREE_ITER_INTENT| 2234 2242 BTREE_ITER_PREFETCH| ··· 2255 2267 } 2256 2268 2257 2269 } 2258 - bch2_trans_iter_exit(&trans, &iter); 2259 - bch2_trans_exit(&trans); 2270 + bch2_trans_iter_exit(trans, &iter); 2271 + bch2_trans_put(trans); 2260 2272 2261 2273 if (ret) 2262 2274 bch_err(c, "error in fsck: btree error %i while walking inodes", ret); ··· 2268 2280 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links, 2269 2281 u64 range_start, u64 range_end) 2270 2282 { 2271 - struct btree_trans trans; 2283 + struct btree_trans *trans = bch2_trans_get(c); 2272 2284 struct snapshots_seen s; 2273 2285 struct btree_iter iter; 2274 2286 struct bkey_s_c k; ··· 2277 2289 2278 2290 snapshots_seen_init(&s); 2279 2291 2280 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 2281 - 2282 - for_each_btree_key(&trans, iter, BTREE_ID_dirents, POS_MIN, 2292 + for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN, 2283 2293 BTREE_ITER_INTENT| 2284 2294 BTREE_ITER_PREFETCH| 2285 2295 BTREE_ITER_ALL_SNAPSHOTS, k, ret) { ··· 2297 2311 break; 2298 2312 } 2299 2313 } 2300 - bch2_trans_iter_exit(&trans, &iter); 2314 + bch2_trans_iter_exit(trans, &iter); 2301 2315 2302 2316 if (ret) 2303 2317 bch_err(c, "error in fsck: btree error %i while walking dirents", ret); 2304 2318 2305 - bch2_trans_exit(&trans); 2319 + bch2_trans_put(trans); 2306 2320 snapshots_seen_exit(&s); 2307 2321 return ret; 2308 2322 } ··· 2353 2367 struct nlink_table *links, 2354 2368 u64 range_start, u64 range_end) 2355 2369 { 2356 - struct btree_trans trans; 2357 2370 struct btree_iter iter; 2358 2371 struct bkey_s_c k; 2359 2372 size_t idx = 0; 2360 2373 int ret = 0; 2361 2374 2362 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 2363 - 2364 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes, 2365 - POS(0, range_start), 2366 - BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 2367 - NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 2368 - check_nlinks_update_inode(&trans, &iter, k, links, &idx, range_end)); 2369 - 2370 - bch2_trans_exit(&trans); 2371 - 2375 + ret = bch2_trans_run(c, 2376 + for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, 2377 + POS(0, range_start), 2378 + BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 2379 + NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 2380 + check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end))); 2372 2381 if (ret < 0) { 2373 2382 bch_err(c, "error in fsck: btree error %i while walking inodes", ret); 2374 2383 return ret; ··· 2445 2464 return 0; 2446 2465 2447 2466 ret = bch2_trans_run(c, 2448 - for_each_btree_key_commit(&trans, iter, 2467 + for_each_btree_key_commit(trans, iter, 2449 2468 BTREE_ID_extents, POS_MIN, 2450 2469 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH| 2451 2470 BTREE_ITER_ALL_SNAPSHOTS, k, 2452 2471 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, 2453 - fix_reflink_p_key(&trans, &iter, k))); 2454 - 2472 + fix_reflink_p_key(trans, &iter, k))); 2455 2473 if (ret) 2456 2474 bch_err_fn(c, ret); 2457 2475 return ret;
+21 -25
fs/bcachefs/inode.c
··· 826 826 827 827 int bch2_inode_rm(struct bch_fs *c, subvol_inum inum) 828 828 { 829 - struct btree_trans trans; 829 + struct btree_trans *trans = bch2_trans_get(c); 830 830 struct btree_iter iter = { NULL }; 831 831 struct bkey_i_inode_generation delete; 832 832 struct bch_inode_unpacked inode_u; 833 833 struct bkey_s_c k; 834 834 u32 snapshot; 835 835 int ret; 836 - 837 - bch2_trans_init(&trans, c, 0, 1024); 838 836 839 837 /* 840 838 * If this was a directory, there shouldn't be any real dirents left - ··· 842 844 * XXX: the dirent could ideally would delete whiteouts when they're no 843 845 * longer needed 844 846 */ 845 - ret = bch2_inode_delete_keys(&trans, inum, BTREE_ID_extents) ?: 846 - bch2_inode_delete_keys(&trans, inum, BTREE_ID_xattrs) ?: 847 - bch2_inode_delete_keys(&trans, inum, BTREE_ID_dirents); 847 + ret = bch2_inode_delete_keys(trans, inum, BTREE_ID_extents) ?: 848 + bch2_inode_delete_keys(trans, inum, BTREE_ID_xattrs) ?: 849 + bch2_inode_delete_keys(trans, inum, BTREE_ID_dirents); 848 850 if (ret) 849 851 goto err; 850 852 retry: 851 - bch2_trans_begin(&trans); 853 + bch2_trans_begin(trans); 852 854 853 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 855 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 854 856 if (ret) 855 857 goto err; 856 858 857 - k = bch2_bkey_get_iter(&trans, &iter, BTREE_ID_inodes, 859 + k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, 858 860 SPOS(0, inum.inum, snapshot), 859 861 BTREE_ITER_INTENT|BTREE_ITER_CACHED); 860 862 ret = bkey_err(k); ··· 862 864 goto err; 863 865 864 866 if (!bkey_is_inode(k.k)) { 865 - bch2_fs_inconsistent(trans.c, 867 + bch2_fs_inconsistent(c, 866 868 "inode %llu:%u not found when deleting", 867 869 inum.inum, snapshot); 868 870 ret = -EIO; ··· 875 877 delete.k.p = iter.pos; 876 878 delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1); 877 879 878 - ret = bch2_trans_update(&trans, &iter, &delete.k_i, 0) ?: 879 - bch2_trans_commit(&trans, NULL, NULL, 880 + ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?: 881 + bch2_trans_commit(trans, NULL, NULL, 880 882 BTREE_INSERT_NOFAIL); 881 883 err: 882 - bch2_trans_iter_exit(&trans, &iter); 884 + bch2_trans_iter_exit(trans, &iter); 883 885 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 884 886 goto retry; 885 887 886 - bch2_trans_exit(&trans); 888 + bch2_trans_put(trans); 887 889 return ret; 888 890 } 889 891 ··· 917 919 struct bch_inode_unpacked *inode) 918 920 { 919 921 return bch2_trans_do(c, NULL, NULL, 0, 920 - bch2_inode_find_by_inum_trans(&trans, inum, inode)); 922 + bch2_inode_find_by_inum_trans(trans, inum, inode)); 921 923 } 922 924 923 925 int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi) ··· 1089 1091 1090 1092 int bch2_delete_dead_inodes(struct bch_fs *c) 1091 1093 { 1092 - struct btree_trans trans; 1094 + struct btree_trans *trans = bch2_trans_get(c); 1093 1095 struct btree_iter iter; 1094 1096 struct bkey_s_c k; 1095 1097 int ret; 1096 1098 1097 - bch2_trans_init(&trans, c, 0, 0); 1098 - 1099 - ret = bch2_btree_write_buffer_flush_sync(&trans); 1099 + ret = bch2_btree_write_buffer_flush_sync(trans); 1100 1100 if (ret) 1101 1101 goto err; 1102 1102 ··· 1104 1108 * but we can't retry because the btree write buffer won't have been 1105 1109 * flushed and we'd spin: 1106 1110 */ 1107 - for_each_btree_key(&trans, iter, BTREE_ID_deleted_inodes, POS_MIN, 1111 + for_each_btree_key(trans, iter, BTREE_ID_deleted_inodes, POS_MIN, 1108 1112 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, ret) { 1109 - ret = lockrestart_do(&trans, may_delete_deleted_inode(&trans, k.k->p)); 1113 + ret = lockrestart_do(trans, may_delete_deleted_inode(trans, k.k->p)); 1110 1114 if (ret < 0) 1111 1115 break; 1112 1116 1113 1117 if (ret) { 1114 1118 if (!test_bit(BCH_FS_RW, &c->flags)) { 1115 - bch2_trans_unlock(&trans); 1119 + bch2_trans_unlock(trans); 1116 1120 bch2_fs_lazy_rw(c); 1117 1121 } 1118 1122 1119 - ret = bch2_inode_rm_snapshot(&trans, k.k->p.offset, k.k->p.snapshot); 1123 + ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot); 1120 1124 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1121 1125 break; 1122 1126 } 1123 1127 } 1124 - bch2_trans_iter_exit(&trans, &iter); 1128 + bch2_trans_iter_exit(trans, &iter); 1125 1129 err: 1126 - bch2_trans_exit(&trans); 1130 + bch2_trans_put(trans); 1127 1131 1128 1132 return ret; 1129 1133 }
+9 -10
fs/bcachefs/io_misc.c
··· 198 198 int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end, 199 199 s64 *i_sectors_delta) 200 200 { 201 - struct btree_trans trans; 201 + struct btree_trans *trans = bch2_trans_get(c); 202 202 struct btree_iter iter; 203 203 int ret; 204 204 205 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024); 206 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 205 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 207 206 POS(inum.inum, start), 208 207 BTREE_ITER_INTENT); 209 208 210 - ret = bch2_fpunch_at(&trans, &iter, inum, end, i_sectors_delta); 209 + ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta); 211 210 212 - bch2_trans_iter_exit(&trans, &iter); 213 - bch2_trans_exit(&trans); 211 + bch2_trans_iter_exit(trans, &iter); 212 + bch2_trans_put(trans); 214 213 215 214 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 216 215 ret = 0; ··· 288 289 op.v.new_i_size = cpu_to_le64(new_i_size); 289 290 290 291 return bch2_trans_run(c, 291 - bch2_logged_op_start(&trans, &op.k_i) ?: 292 - __bch2_resume_logged_op_truncate(&trans, &op.k_i, i_sectors_delta)); 292 + bch2_logged_op_start(trans, &op.k_i) ?: 293 + __bch2_resume_logged_op_truncate(trans, &op.k_i, i_sectors_delta)); 293 294 } 294 295 295 296 /* finsert/fcollapse: */ ··· 492 493 op.v.pos = cpu_to_le64(insert ? U64_MAX : offset); 493 494 494 495 return bch2_trans_run(c, 495 - bch2_logged_op_start(&trans, &op.k_i) ?: 496 - __bch2_resume_logged_op_finsert(&trans, &op.k_i, i_sectors_delta)); 496 + bch2_logged_op_start(trans, &op.k_i) ?: 497 + __bch2_resume_logged_op_finsert(trans, &op.k_i, i_sectors_delta)); 497 498 }
+17 -19
fs/bcachefs/io_read.c
··· 359 359 struct bch_io_failures *failed, 360 360 unsigned flags) 361 361 { 362 - struct btree_trans trans; 362 + struct btree_trans *trans = bch2_trans_get(c); 363 363 struct btree_iter iter; 364 364 struct bkey_buf sk; 365 365 struct bkey_s_c k; ··· 369 369 flags |= BCH_READ_MUST_CLONE; 370 370 371 371 bch2_bkey_buf_init(&sk); 372 - bch2_trans_init(&trans, c, 0, 0); 373 372 374 - bch2_trans_iter_init(&trans, &iter, rbio->data_btree, 373 + bch2_trans_iter_init(trans, &iter, rbio->data_btree, 375 374 rbio->read_pos, BTREE_ITER_SLOTS); 376 375 retry: 377 376 rbio->bio.bi_status = 0; ··· 381 382 382 383 bch2_bkey_buf_reassemble(&sk, c, k); 383 384 k = bkey_i_to_s_c(sk.k); 384 - bch2_trans_unlock(&trans); 385 + bch2_trans_unlock(trans); 385 386 386 387 if (!bch2_bkey_matches_ptr(c, k, 387 388 rbio->pick.ptr, ··· 392 393 goto out; 393 394 } 394 395 395 - ret = __bch2_read_extent(&trans, rbio, bvec_iter, 396 + ret = __bch2_read_extent(trans, rbio, bvec_iter, 396 397 rbio->read_pos, 397 398 rbio->data_btree, 398 399 k, 0, failed, flags); ··· 402 403 goto err; 403 404 out: 404 405 bch2_rbio_done(rbio); 405 - bch2_trans_iter_exit(&trans, &iter); 406 - bch2_trans_exit(&trans); 406 + bch2_trans_iter_exit(trans, &iter); 407 + bch2_trans_put(trans); 407 408 bch2_bkey_buf_exit(&sk, c); 408 409 return; 409 410 err: ··· 525 526 static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio) 526 527 { 527 528 bch2_trans_do(rbio->c, NULL, NULL, BTREE_INSERT_NOFAIL, 528 - __bch2_rbio_narrow_crcs(&trans, rbio)); 529 + __bch2_rbio_narrow_crcs(trans, rbio)); 529 530 } 530 531 531 532 /* Inner part that may run in process context */ ··· 1081 1082 struct bvec_iter bvec_iter, subvol_inum inum, 1082 1083 struct bch_io_failures *failed, unsigned flags) 1083 1084 { 1084 - struct btree_trans trans; 1085 + struct btree_trans *trans = bch2_trans_get(c); 1085 1086 struct btree_iter iter; 1086 1087 struct bkey_buf sk; 1087 1088 struct bkey_s_c k; ··· 1091 1092 BUG_ON(flags & BCH_READ_NODECODE); 1092 1093 1093 1094 bch2_bkey_buf_init(&sk); 1094 - bch2_trans_init(&trans, c, 0, 0); 1095 1095 retry: 1096 - bch2_trans_begin(&trans); 1096 + bch2_trans_begin(trans); 1097 1097 iter = (struct btree_iter) { NULL }; 1098 1098 1099 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot); 1099 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot); 1100 1100 if (ret) 1101 1101 goto err; 1102 1102 1103 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 1103 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 1104 1104 SPOS(inum.inum, bvec_iter.bi_sector, snapshot), 1105 1105 BTREE_ITER_SLOTS); 1106 1106 while (1) { ··· 1110 1112 * read_extent -> io_time_reset may cause a transaction restart 1111 1113 * without returning an error, we need to check for that here: 1112 1114 */ 1113 - ret = bch2_trans_relock(&trans); 1115 + ret = bch2_trans_relock(trans); 1114 1116 if (ret) 1115 1117 break; 1116 1118 ··· 1128 1130 1129 1131 bch2_bkey_buf_reassemble(&sk, c, k); 1130 1132 1131 - ret = bch2_read_indirect_extent(&trans, &data_btree, 1133 + ret = bch2_read_indirect_extent(trans, &data_btree, 1132 1134 &offset_into_extent, &sk); 1133 1135 if (ret) 1134 1136 break; ··· 1147 1149 if (bvec_iter.bi_size == bytes) 1148 1150 flags |= BCH_READ_LAST_FRAGMENT; 1149 1151 1150 - ret = __bch2_read_extent(&trans, rbio, bvec_iter, iter.pos, 1152 + ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos, 1151 1153 data_btree, k, 1152 1154 offset_into_extent, failed, flags); 1153 1155 if (ret) ··· 1159 1161 swap(bvec_iter.bi_size, bytes); 1160 1162 bio_advance_iter(&rbio->bio, &bvec_iter, bytes); 1161 1163 1162 - ret = btree_trans_too_many_iters(&trans); 1164 + ret = btree_trans_too_many_iters(trans); 1163 1165 if (ret) 1164 1166 break; 1165 1167 } 1166 1168 err: 1167 - bch2_trans_iter_exit(&trans, &iter); 1169 + bch2_trans_iter_exit(trans, &iter); 1168 1170 1169 1171 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || 1170 1172 ret == READ_RETRY || 1171 1173 ret == READ_RETRY_AVOID) 1172 1174 goto retry; 1173 1175 1174 - bch2_trans_exit(&trans); 1176 + bch2_trans_put(trans); 1175 1177 bch2_bkey_buf_exit(&sk, c); 1176 1178 1177 1179 if (ret) {
+20 -23
fs/bcachefs/io_write.c
··· 322 322 struct bkey_buf sk; 323 323 struct keylist *keys = &op->insert_keys; 324 324 struct bkey_i *k = bch2_keylist_front(keys); 325 - struct btree_trans trans; 325 + struct btree_trans *trans = bch2_trans_get(c); 326 326 struct btree_iter iter; 327 327 subvol_inum inum = { 328 328 .subvol = op->subvol, ··· 333 333 BUG_ON(!inum.subvol); 334 334 335 335 bch2_bkey_buf_init(&sk); 336 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024); 337 336 338 337 do { 339 - bch2_trans_begin(&trans); 338 + bch2_trans_begin(trans); 340 339 341 340 k = bch2_keylist_front(keys); 342 341 bch2_bkey_buf_copy(&sk, c, k); 343 342 344 - ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, 343 + ret = bch2_subvolume_get_snapshot(trans, inum.subvol, 345 344 &sk.k->k.p.snapshot); 346 345 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 347 346 continue; 348 347 if (ret) 349 348 break; 350 349 351 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 350 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 352 351 bkey_start_pos(&sk.k->k), 353 352 BTREE_ITER_SLOTS|BTREE_ITER_INTENT); 354 353 355 - ret = bch2_extent_update(&trans, inum, &iter, sk.k, 354 + ret = bch2_extent_update(trans, inum, &iter, sk.k, 356 355 &op->res, 357 356 op->new_i_size, &op->i_sectors_delta, 358 357 op->flags & BCH_WRITE_CHECK_ENOSPC); 359 - bch2_trans_iter_exit(&trans, &iter); 358 + bch2_trans_iter_exit(trans, &iter); 360 359 361 360 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 362 361 continue; ··· 368 369 bch2_cut_front(iter.pos, k); 369 370 } while (!bch2_keylist_empty(keys)); 370 371 371 - bch2_trans_exit(&trans); 372 + bch2_trans_put(trans); 372 373 bch2_bkey_buf_exit(&sk, c); 373 374 374 375 return ret; ··· 1162 1163 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) 1163 1164 { 1164 1165 struct bch_fs *c = op->c; 1165 - struct btree_trans trans; 1166 + struct btree_trans *trans = bch2_trans_get(c); 1166 1167 struct btree_iter iter; 1167 1168 struct bkey_i *orig; 1168 1169 struct bkey_s_c k; 1169 1170 int ret; 1170 1171 1171 - bch2_trans_init(&trans, c, 0, 0); 1172 - 1173 1172 for_each_keylist_key(&op->insert_keys, orig) { 1174 - ret = for_each_btree_key_upto_commit(&trans, iter, BTREE_ID_extents, 1173 + ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents, 1175 1174 bkey_start_pos(&orig->k), orig->k.p, 1176 1175 BTREE_ITER_INTENT, k, 1177 1176 NULL, NULL, BTREE_INSERT_NOFAIL, ({ 1178 - bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size); 1177 + bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size); 1179 1178 })); 1180 1179 1181 1180 if (ret && !bch2_err_matches(ret, EROFS)) { ··· 1191 1194 } 1192 1195 } 1193 1196 1194 - bch2_trans_exit(&trans); 1197 + bch2_trans_put(trans); 1195 1198 } 1196 1199 1197 1200 static void __bch2_nocow_write_done(struct bch_write_op *op) ··· 1215 1218 static void bch2_nocow_write(struct bch_write_op *op) 1216 1219 { 1217 1220 struct bch_fs *c = op->c; 1218 - struct btree_trans trans; 1221 + struct btree_trans *trans; 1219 1222 struct btree_iter iter; 1220 1223 struct bkey_s_c k; 1221 1224 struct bkey_ptrs_c ptrs; ··· 1232 1235 if (op->flags & BCH_WRITE_MOVE) 1233 1236 return; 1234 1237 1235 - bch2_trans_init(&trans, c, 0, 0); 1238 + trans = bch2_trans_get(c); 1236 1239 retry: 1237 - bch2_trans_begin(&trans); 1240 + bch2_trans_begin(trans); 1238 1241 1239 - ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot); 1242 + ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot); 1240 1243 if (unlikely(ret)) 1241 1244 goto err; 1242 1245 1243 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 1246 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 1244 1247 SPOS(op->pos.inode, op->pos.offset, snapshot), 1245 1248 BTREE_ITER_SLOTS); 1246 1249 while (1) { ··· 1286 1289 1287 1290 /* Unlock before taking nocow locks, doing IO: */ 1288 1291 bkey_reassemble(op->insert_keys.top, k); 1289 - bch2_trans_unlock(&trans); 1292 + bch2_trans_unlock(trans); 1290 1293 1291 1294 bch2_cut_front(op->pos, op->insert_keys.top); 1292 1295 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN) ··· 1335 1338 bch2_btree_iter_advance(&iter); 1336 1339 } 1337 1340 out: 1338 - bch2_trans_iter_exit(&trans, &iter); 1341 + bch2_trans_iter_exit(trans, &iter); 1339 1342 err: 1340 1343 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1341 1344 goto retry; ··· 1350 1353 op->flags |= BCH_WRITE_DONE; 1351 1354 } 1352 1355 1353 - bch2_trans_exit(&trans); 1356 + bch2_trans_put(trans); 1354 1357 1355 1358 /* fallback to cow write path? */ 1356 1359 if (!(op->flags & BCH_WRITE_DONE)) { ··· 1428 1431 * allocations for specific disks may hang arbitrarily long: 1429 1432 */ 1430 1433 ret = bch2_trans_do(c, NULL, NULL, 0, 1431 - bch2_alloc_sectors_start_trans(&trans, 1434 + bch2_alloc_sectors_start_trans(trans, 1432 1435 op->target, 1433 1436 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED), 1434 1437 op->write_point,
+2 -2
fs/bcachefs/journal.c
··· 834 834 break; 835 835 836 836 ret = bch2_trans_run(c, 837 - bch2_trans_mark_metadata_bucket(&trans, ca, 837 + bch2_trans_mark_metadata_bucket(trans, ca, 838 838 ob[nr_got]->bucket, BCH_DATA_journal, 839 839 ca->mi.bucket_size)); 840 840 if (ret) { ··· 915 915 if (ret && !new_fs) 916 916 for (i = 0; i < nr_got; i++) 917 917 bch2_trans_run(c, 918 - bch2_trans_mark_metadata_bucket(&trans, ca, 918 + bch2_trans_mark_metadata_bucket(trans, ca, 919 919 bu[i], BCH_DATA_free, 0)); 920 920 err_free: 921 921 if (!new_fs)
+5 -7
fs/bcachefs/journal_seq_blacklist.c
··· 250 250 struct journal_seq_blacklist_table *t; 251 251 struct bch_sb_field_journal_seq_blacklist *bl; 252 252 struct journal_seq_blacklist_entry *src, *dst; 253 - struct btree_trans trans; 253 + struct btree_trans *trans = bch2_trans_get(c); 254 254 unsigned i, nr, new_nr; 255 255 int ret; 256 - 257 - bch2_trans_init(&trans, c, 0, 0); 258 256 259 257 for (i = 0; i < BTREE_ID_NR; i++) { 260 258 struct btree_iter iter; 261 259 struct btree *b; 262 260 263 - bch2_trans_node_iter_init(&trans, &iter, i, POS_MIN, 261 + bch2_trans_node_iter_init(trans, &iter, i, POS_MIN, 264 262 0, 0, BTREE_ITER_PREFETCH); 265 263 retry: 266 - bch2_trans_begin(&trans); 264 + bch2_trans_begin(trans); 267 265 268 266 b = bch2_btree_iter_peek_node(&iter); 269 267 ··· 273 275 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 274 276 goto retry; 275 277 276 - bch2_trans_iter_exit(&trans, &iter); 278 + bch2_trans_iter_exit(trans, &iter); 277 279 } 278 280 279 - bch2_trans_exit(&trans); 281 + bch2_trans_put(trans); 280 282 if (ret) 281 283 return; 282 284
+2 -2
fs/bcachefs/logged_ops.c
··· 59 59 int ret; 60 60 61 61 ret = bch2_trans_run(c, 62 - for_each_btree_key2(&trans, iter, 62 + for_each_btree_key2(trans, iter, 63 63 BTREE_ID_logged_ops, POS_MIN, BTREE_ITER_PREFETCH, k, 64 - resume_logged_op(&trans, &iter, k))); 64 + resume_logged_op(trans, &iter, k))); 65 65 if (ret) 66 66 bch_err_fn(c, ret); 67 67 return ret;
+2 -2
fs/bcachefs/lru.c
··· 151 151 int ret = 0; 152 152 153 153 ret = bch2_trans_run(c, 154 - for_each_btree_key_commit(&trans, iter, 154 + for_each_btree_key_commit(trans, iter, 155 155 BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k, 156 156 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW, 157 - bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos))); 157 + bch2_check_lru_key(trans, &iter, k, &last_flushed_pos))); 158 158 if (ret) 159 159 bch_err_fn(c, ret); 160 160 return ret;
+11 -13
fs/bcachefs/migrate.c
··· 78 78 79 79 static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags) 80 80 { 81 - struct btree_trans trans; 81 + struct btree_trans *trans = bch2_trans_get(c); 82 82 struct btree_iter iter; 83 83 struct bkey_s_c k; 84 84 enum btree_id id; 85 85 int ret = 0; 86 86 87 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); 88 - 89 87 for (id = 0; id < BTREE_ID_NR; id++) { 90 88 if (!btree_type_has_ptrs(id)) 91 89 continue; 92 90 93 - ret = for_each_btree_key_commit(&trans, iter, id, POS_MIN, 91 + ret = for_each_btree_key_commit(trans, iter, id, POS_MIN, 94 92 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 95 93 NULL, NULL, BTREE_INSERT_NOFAIL, 96 - bch2_dev_usrdata_drop_key(&trans, &iter, k, dev_idx, flags)); 94 + bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags)); 97 95 if (ret) 98 96 break; 99 97 } 100 98 101 - bch2_trans_exit(&trans); 99 + bch2_trans_put(trans); 102 100 103 101 return ret; 104 102 } 105 103 106 104 static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags) 107 105 { 108 - struct btree_trans trans; 106 + struct btree_trans *trans; 109 107 struct btree_iter iter; 110 108 struct closure cl; 111 109 struct btree *b; ··· 115 117 if (flags & BCH_FORCE_IF_METADATA_LOST) 116 118 return -EINVAL; 117 119 120 + trans = bch2_trans_get(c); 118 121 bch2_bkey_buf_init(&k); 119 - bch2_trans_init(&trans, c, 0, 0); 120 122 closure_init_stack(&cl); 121 123 122 124 for (id = 0; id < BTREE_ID_NR; id++) { 123 - bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0, 125 + bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0, 124 126 BTREE_ITER_PREFETCH); 125 127 retry: 126 128 ret = 0; 127 - while (bch2_trans_begin(&trans), 129 + while (bch2_trans_begin(trans), 128 130 (b = bch2_btree_iter_peek_node(&iter)) && 129 131 !(ret = PTR_ERR_OR_ZERO(b))) { 130 132 if (!bch2_bkey_has_device_c(bkey_i_to_s_c(&b->key), dev_idx)) ··· 139 141 break; 140 142 } 141 143 142 - ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, 0, false); 144 + ret = bch2_btree_node_update_key(trans, &iter, b, k.k, 0, false); 143 145 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 144 146 ret = 0; 145 147 continue; ··· 155 157 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 156 158 goto retry; 157 159 158 - bch2_trans_iter_exit(&trans, &iter); 160 + bch2_trans_iter_exit(trans, &iter); 159 161 160 162 if (ret) 161 163 goto err; ··· 164 166 bch2_btree_interior_updates_flush(c); 165 167 ret = 0; 166 168 err: 167 - bch2_trans_exit(&trans); 168 169 bch2_bkey_buf_exit(&k, c); 170 + bch2_trans_put(trans); 169 171 170 172 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart)); 171 173
+18 -21
fs/bcachefs/move.c
··· 525 525 struct bch_fs *c = ctxt->c; 526 526 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); 527 527 struct bkey_buf sk; 528 - struct btree_trans trans; 528 + struct btree_trans *trans = bch2_trans_get(c); 529 529 struct btree_iter iter; 530 530 struct bkey_s_c k; 531 531 struct data_update_opts data_opts; ··· 533 533 int ret = 0, ret2; 534 534 535 535 bch2_bkey_buf_init(&sk); 536 - bch2_trans_init(&trans, c, 0, 0); 537 536 538 537 if (ctxt->stats) { 539 538 ctxt->stats->data_type = BCH_DATA_user; ··· 540 541 ctxt->stats->pos = start; 541 542 } 542 543 543 - bch2_trans_iter_init(&trans, &iter, btree_id, start, 544 + bch2_trans_iter_init(trans, &iter, btree_id, start, 544 545 BTREE_ITER_PREFETCH| 545 546 BTREE_ITER_ALL_SNAPSHOTS); 546 547 547 548 if (ctxt->rate) 548 549 bch2_ratelimit_reset(ctxt->rate); 549 550 550 - while (!move_ratelimit(&trans, ctxt)) { 551 - bch2_trans_begin(&trans); 551 + while (!move_ratelimit(trans, ctxt)) { 552 + bch2_trans_begin(trans); 552 553 553 554 k = bch2_btree_iter_peek(&iter); 554 555 if (!k.k) ··· 569 570 if (!bkey_extent_is_direct_data(k.k)) 570 571 goto next_nondata; 571 572 572 - ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); 573 + ret = move_get_io_opts(trans, &io_opts, k, &cur_inum); 573 574 if (ret) 574 575 continue; 575 576 ··· 584 585 bch2_bkey_buf_reassemble(&sk, c, k); 585 586 k = bkey_i_to_s_c(sk.k); 586 587 587 - ret2 = bch2_move_extent(&trans, &iter, ctxt, NULL, 588 + ret2 = bch2_move_extent(trans, &iter, ctxt, NULL, 588 589 io_opts, btree_id, k, data_opts); 589 590 if (ret2) { 590 591 if (bch2_err_matches(ret2, BCH_ERR_transaction_restart)) ··· 592 593 593 594 if (ret2 == -ENOMEM) { 594 595 /* memory allocation failure, wait for some IO to finish */ 595 - bch2_move_ctxt_wait_for_io(ctxt, &trans); 596 + bch2_move_ctxt_wait_for_io(ctxt, trans); 596 597 continue; 597 598 } 598 599 ··· 609 610 bch2_btree_iter_advance(&iter); 610 611 } 611 612 612 - bch2_trans_iter_exit(&trans, &iter); 613 - bch2_trans_exit(&trans); 613 + bch2_trans_iter_exit(trans, &iter); 614 + bch2_trans_put(trans); 614 615 bch2_bkey_buf_exit(&sk, c); 615 616 616 617 return ret; ··· 825 826 struct write_point_specifier wp, 826 827 bool wait_on_copygc) 827 828 { 828 - struct btree_trans trans; 829 + struct btree_trans *trans = bch2_trans_get(c); 829 830 struct moving_context ctxt; 830 831 int ret; 831 832 832 - bch2_trans_init(&trans, c, 0, 0); 833 833 bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); 834 - ret = __bch2_evacuate_bucket(&trans, &ctxt, NULL, bucket, gen, data_opts); 834 + ret = __bch2_evacuate_bucket(trans, &ctxt, NULL, bucket, gen, data_opts); 835 835 bch2_moving_ctxt_exit(&ctxt); 836 - bch2_trans_exit(&trans); 836 + bch2_trans_put(trans); 837 837 838 838 return ret; 839 839 } ··· 849 851 { 850 852 bool kthread = (current->flags & PF_KTHREAD) != 0; 851 853 struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); 852 - struct btree_trans trans; 854 + struct btree_trans *trans = bch2_trans_get(c); 853 855 struct btree_iter iter; 854 856 struct btree *b; 855 857 enum btree_id id; 856 858 struct data_update_opts data_opts; 857 859 int ret = 0; 858 860 859 - bch2_trans_init(&trans, c, 0, 0); 860 861 progress_list_add(c, stats); 861 862 862 863 stats->data_type = BCH_DATA_btree; ··· 868 871 if (!bch2_btree_id_root(c, id)->b) 869 872 continue; 870 873 871 - bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0, 874 + bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0, 872 875 BTREE_ITER_PREFETCH); 873 876 retry: 874 877 ret = 0; 875 - while (bch2_trans_begin(&trans), 878 + while (bch2_trans_begin(trans), 876 879 (b = bch2_btree_iter_peek_node(&iter)) && 877 880 !(ret = PTR_ERR_OR_ZERO(b))) { 878 881 if (kthread && kthread_should_stop()) ··· 887 890 if (!pred(c, arg, b, &io_opts, &data_opts)) 888 891 goto next; 889 892 890 - ret = bch2_btree_node_rewrite(&trans, &iter, b, 0) ?: ret; 893 + ret = bch2_btree_node_rewrite(trans, &iter, b, 0) ?: ret; 891 894 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 892 895 continue; 893 896 if (ret) ··· 898 901 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 899 902 goto retry; 900 903 901 - bch2_trans_iter_exit(&trans, &iter); 904 + bch2_trans_iter_exit(trans, &iter); 902 905 903 906 if (kthread && kthread_should_stop()) 904 907 break; 905 908 } 906 909 907 - bch2_trans_exit(&trans); 910 + bch2_trans_put(trans); 908 911 909 912 if (ret) 910 913 bch_err_fn(c, ret);
+9 -9
fs/bcachefs/movinggc.c
··· 300 300 static int bch2_copygc_thread(void *arg) 301 301 { 302 302 struct bch_fs *c = arg; 303 - struct btree_trans trans; 303 + struct btree_trans *trans; 304 304 struct moving_context ctxt; 305 305 struct bch_move_stats move_stats; 306 306 struct io_clock *clock = &c->io_clock[WRITE]; ··· 317 317 } 318 318 319 319 set_freezable(); 320 - bch2_trans_init(&trans, c, 0, 0); 320 + trans = bch2_trans_get(c); 321 321 322 322 bch2_move_stats_init(&move_stats, "copygc"); 323 323 bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats, ··· 325 325 false); 326 326 327 327 while (!ret && !kthread_should_stop()) { 328 - bch2_trans_unlock(&trans); 328 + bch2_trans_unlock(trans); 329 329 cond_resched(); 330 330 331 331 if (!c->copy_gc_enabled) { 332 - move_buckets_wait(&trans, &ctxt, &buckets, true); 332 + move_buckets_wait(trans, &ctxt, &buckets, true); 333 333 kthread_wait_freezable(c->copy_gc_enabled); 334 334 } 335 335 336 336 if (unlikely(freezing(current))) { 337 - move_buckets_wait(&trans, &ctxt, &buckets, true); 337 + move_buckets_wait(trans, &ctxt, &buckets, true); 338 338 __refrigerator(false); 339 339 continue; 340 340 } ··· 345 345 if (wait > clock->max_slop) { 346 346 c->copygc_wait_at = last; 347 347 c->copygc_wait = last + wait; 348 - move_buckets_wait(&trans, &ctxt, &buckets, true); 348 + move_buckets_wait(trans, &ctxt, &buckets, true); 349 349 trace_and_count(c, copygc_wait, c, wait, last + wait); 350 350 bch2_kthread_io_clock_wait(clock, last + wait, 351 351 MAX_SCHEDULE_TIMEOUT); ··· 355 355 c->copygc_wait = 0; 356 356 357 357 c->copygc_running = true; 358 - ret = bch2_copygc(&trans, &ctxt, &buckets); 358 + ret = bch2_copygc(trans, &ctxt, &buckets); 359 359 c->copygc_running = false; 360 360 361 361 wake_up(&c->copygc_running_wq); 362 362 } 363 363 364 - move_buckets_wait(&trans, &ctxt, &buckets, true); 364 + move_buckets_wait(trans, &ctxt, &buckets, true); 365 365 rhashtable_destroy(&buckets.table); 366 - bch2_trans_exit(&trans); 366 + bch2_trans_put(trans); 367 367 bch2_moving_ctxt_exit(&ctxt); 368 368 369 369 return 0;
+7 -7
fs/bcachefs/quota.c
··· 599 599 int bch2_fs_quota_read(struct bch_fs *c) 600 600 { 601 601 struct bch_sb_field_quota *sb_quota; 602 - struct btree_trans trans; 602 + struct btree_trans *trans; 603 603 struct btree_iter iter; 604 604 struct bkey_s_c k; 605 605 int ret; ··· 614 614 bch2_sb_quota_read(c); 615 615 mutex_unlock(&c->sb_lock); 616 616 617 - bch2_trans_init(&trans, c, 0, 0); 617 + trans = bch2_trans_get(c); 618 618 619 - ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas, 619 + ret = for_each_btree_key2(trans, iter, BTREE_ID_quotas, 620 620 POS_MIN, BTREE_ITER_PREFETCH, k, 621 621 __bch2_quota_set(c, k, NULL)) ?: 622 - for_each_btree_key2(&trans, iter, BTREE_ID_inodes, 622 + for_each_btree_key2(trans, iter, BTREE_ID_inodes, 623 623 POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 624 - bch2_fs_quota_read_inode(&trans, &iter, k)); 624 + bch2_fs_quota_read_inode(trans, &iter, k)); 625 625 626 - bch2_trans_exit(&trans); 626 + bch2_trans_put(trans); 627 627 628 628 if (ret) 629 629 bch_err_fn(c, ret); ··· 956 956 new_quota.k.p = POS(qid.type, from_kqid(&init_user_ns, qid)); 957 957 958 958 ret = bch2_trans_do(c, NULL, NULL, 0, 959 - bch2_set_quota_trans(&trans, &new_quota, qdq)) ?: 959 + bch2_set_quota_trans(trans, &new_quota, qdq)) ?: 960 960 __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq); 961 961 962 962 return bch2_err_class(ret);
+3 -3
fs/bcachefs/recovery.c
··· 165 165 (!k->allocated 166 166 ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim 167 167 : 0), 168 - bch2_journal_replay_key(&trans, k)); 168 + bch2_journal_replay_key(trans, k)); 169 169 if (ret) { 170 170 bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s", 171 171 bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret)); ··· 466 466 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c) 467 467 { 468 468 int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW, 469 - __bch2_fs_upgrade_for_subvolumes(&trans)); 469 + __bch2_fs_upgrade_for_subvolumes(trans)); 470 470 if (ret) 471 471 bch_err_fn(c, ret); 472 472 return ret; ··· 1013 1013 bch2_inode_init_early(c, &lostfound_inode); 1014 1014 1015 1015 ret = bch2_trans_do(c, NULL, NULL, 0, 1016 - bch2_create_trans(&trans, 1016 + bch2_create_trans(trans, 1017 1017 BCACHEFS_ROOT_SUBVOL_INUM, 1018 1018 &root_inode, &lostfound_inode, 1019 1019 &lostfound,
+18 -18
fs/bcachefs/reflink.c
··· 253 253 u64 remap_sectors, 254 254 u64 new_i_size, s64 *i_sectors_delta) 255 255 { 256 - struct btree_trans trans; 256 + struct btree_trans *trans; 257 257 struct btree_iter dst_iter, src_iter; 258 258 struct bkey_s_c src_k; 259 259 struct bkey_buf new_dst, new_src; ··· 275 275 276 276 bch2_bkey_buf_init(&new_dst); 277 277 bch2_bkey_buf_init(&new_src); 278 - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096); 278 + trans = bch2_trans_get(c); 279 279 280 - bch2_trans_iter_init(&trans, &src_iter, BTREE_ID_extents, src_start, 280 + bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start, 281 281 BTREE_ITER_INTENT); 282 - bch2_trans_iter_init(&trans, &dst_iter, BTREE_ID_extents, dst_start, 282 + bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start, 283 283 BTREE_ITER_INTENT); 284 284 285 285 while ((ret == 0 || ··· 287 287 bkey_lt(dst_iter.pos, dst_end)) { 288 288 struct disk_reservation disk_res = { 0 }; 289 289 290 - bch2_trans_begin(&trans); 290 + bch2_trans_begin(trans); 291 291 292 292 if (fatal_signal_pending(current)) { 293 293 ret = -EINTR; 294 294 break; 295 295 } 296 296 297 - ret = bch2_subvolume_get_snapshot(&trans, src_inum.subvol, 297 + ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol, 298 298 &src_snapshot); 299 299 if (ret) 300 300 continue; 301 301 302 302 bch2_btree_iter_set_snapshot(&src_iter, src_snapshot); 303 303 304 - ret = bch2_subvolume_get_snapshot(&trans, dst_inum.subvol, 304 + ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol, 305 305 &dst_snapshot); 306 306 if (ret) 307 307 continue; ··· 318 318 continue; 319 319 320 320 if (bkey_lt(src_want, src_iter.pos)) { 321 - ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum, 321 + ret = bch2_fpunch_at(trans, &dst_iter, dst_inum, 322 322 min(dst_end.offset, 323 323 dst_iter.pos.offset + 324 324 src_iter.pos.offset - src_want.offset), ··· 332 332 bch2_bkey_buf_reassemble(&new_src, c, src_k); 333 333 src_k = bkey_i_to_s_c(new_src.k); 334 334 335 - ret = bch2_make_extent_indirect(&trans, &src_iter, 335 + ret = bch2_make_extent_indirect(trans, &src_iter, 336 336 new_src.k); 337 337 if (ret) 338 338 continue; ··· 360 360 min(src_k.k->p.offset - src_want.offset, 361 361 dst_end.offset - dst_iter.pos.offset)); 362 362 363 - ret = bch2_extent_update(&trans, dst_inum, &dst_iter, 363 + ret = bch2_extent_update(trans, dst_inum, &dst_iter, 364 364 new_dst.k, &disk_res, 365 365 new_i_size, i_sectors_delta, 366 366 true); 367 367 bch2_disk_reservation_put(c, &disk_res); 368 368 } 369 - bch2_trans_iter_exit(&trans, &dst_iter); 370 - bch2_trans_iter_exit(&trans, &src_iter); 369 + bch2_trans_iter_exit(trans, &dst_iter); 370 + bch2_trans_iter_exit(trans, &src_iter); 371 371 372 372 BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end)); 373 373 BUG_ON(bkey_gt(dst_iter.pos, dst_end)); ··· 379 379 struct bch_inode_unpacked inode_u; 380 380 struct btree_iter inode_iter = { NULL }; 381 381 382 - bch2_trans_begin(&trans); 382 + bch2_trans_begin(trans); 383 383 384 - ret2 = bch2_inode_peek(&trans, &inode_iter, &inode_u, 384 + ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u, 385 385 dst_inum, BTREE_ITER_INTENT); 386 386 387 387 if (!ret2 && 388 388 inode_u.bi_size < new_i_size) { 389 389 inode_u.bi_size = new_i_size; 390 - ret2 = bch2_inode_write(&trans, &inode_iter, &inode_u) ?: 391 - bch2_trans_commit(&trans, NULL, NULL, 390 + ret2 = bch2_inode_write(trans, &inode_iter, &inode_u) ?: 391 + bch2_trans_commit(trans, NULL, NULL, 392 392 BTREE_INSERT_NOFAIL); 393 393 } 394 394 395 - bch2_trans_iter_exit(&trans, &inode_iter); 395 + bch2_trans_iter_exit(trans, &inode_iter); 396 396 } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart)); 397 397 398 - bch2_trans_exit(&trans); 398 + bch2_trans_put(trans); 399 399 bch2_bkey_buf_exit(&new_src, c); 400 400 bch2_bkey_buf_exit(&new_dst, c); 401 401
+29 -29
fs/bcachefs/snapshot.c
··· 610 610 int ret; 611 611 612 612 ret = bch2_trans_run(c, 613 - for_each_btree_key_commit(&trans, iter, 613 + for_each_btree_key_commit(trans, iter, 614 614 BTREE_ID_snapshot_trees, POS_MIN, 615 615 BTREE_ITER_PREFETCH, k, 616 616 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 617 - check_snapshot_tree(&trans, &iter, k))); 617 + check_snapshot_tree(trans, &iter, k))); 618 618 619 619 if (ret) 620 620 bch_err(c, "error %i checking snapshot trees", ret); ··· 883 883 * the parent's depth already be correct: 884 884 */ 885 885 ret = bch2_trans_run(c, 886 - for_each_btree_key_reverse_commit(&trans, iter, 886 + for_each_btree_key_reverse_commit(trans, iter, 887 887 BTREE_ID_snapshots, POS_MAX, 888 888 BTREE_ITER_PREFETCH, k, 889 889 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 890 - check_snapshot(&trans, &iter, k))); 890 + check_snapshot(trans, &iter, k))); 891 891 if (ret) 892 892 bch_err_fn(c, ret); 893 893 return ret; ··· 1373 1373 1374 1374 int bch2_delete_dead_snapshots(struct bch_fs *c) 1375 1375 { 1376 - struct btree_trans trans; 1376 + struct btree_trans *trans; 1377 1377 struct btree_iter iter; 1378 1378 struct bkey_s_c k; 1379 1379 struct bkey_s_c_snapshot snap; ··· 1390 1390 } 1391 1391 } 1392 1392 1393 - bch2_trans_init(&trans, c, 0, 0); 1393 + trans = bch2_trans_get(c); 1394 1394 1395 1395 /* 1396 1396 * For every snapshot node: If we have no live children and it's not 1397 1397 * pointed to by a subvolume, delete it: 1398 1398 */ 1399 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots, 1399 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, 1400 1400 POS_MIN, 0, k, 1401 1401 NULL, NULL, 0, 1402 - bch2_delete_redundant_snapshot(&trans, &iter, k)); 1402 + bch2_delete_redundant_snapshot(trans, &iter, k)); 1403 1403 if (ret) { 1404 1404 bch_err_msg(c, ret, "deleting redundant snapshots"); 1405 1405 goto err; 1406 1406 } 1407 1407 1408 - for_each_btree_key2(&trans, iter, BTREE_ID_snapshots, 1408 + for_each_btree_key2(trans, iter, BTREE_ID_snapshots, 1409 1409 POS_MIN, 0, k, 1410 - bch2_snapshot_set_equiv(&trans, k)); 1410 + bch2_snapshot_set_equiv(trans, k)); 1411 1411 if (ret) { 1412 1412 bch_err_msg(c, ret, "in bch2_snapshots_set_equiv"); 1413 1413 goto err; 1414 1414 } 1415 1415 1416 - for_each_btree_key(&trans, iter, BTREE_ID_snapshots, 1416 + for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1417 1417 POS_MIN, 0, k, ret) { 1418 1418 if (k.k->type != KEY_TYPE_snapshot) 1419 1419 continue; ··· 1425 1425 break; 1426 1426 } 1427 1427 } 1428 - bch2_trans_iter_exit(&trans, &iter); 1428 + bch2_trans_iter_exit(trans, &iter); 1429 1429 1430 1430 if (ret) { 1431 1431 bch_err_msg(c, ret, "walking snapshots"); ··· 1440 1440 if (!btree_type_has_snapshots(id)) 1441 1441 continue; 1442 1442 1443 - ret = for_each_btree_key_commit(&trans, iter, 1443 + ret = for_each_btree_key_commit(trans, iter, 1444 1444 id, POS_MIN, 1445 1445 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 1446 1446 &res, NULL, BTREE_INSERT_NOFAIL, 1447 - snapshot_delete_key(&trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?: 1448 - for_each_btree_key_commit(&trans, iter, 1447 + snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?: 1448 + for_each_btree_key_commit(trans, iter, 1449 1449 id, POS_MIN, 1450 1450 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, 1451 1451 &res, NULL, BTREE_INSERT_NOFAIL, 1452 - move_key_to_correct_snapshot(&trans, &iter, k)); 1452 + move_key_to_correct_snapshot(trans, &iter, k)); 1453 1453 1454 1454 bch2_disk_reservation_put(c, &res); 1455 1455 darray_exit(&equiv_seen); ··· 1460 1460 } 1461 1461 } 1462 1462 1463 - for_each_btree_key(&trans, iter, BTREE_ID_snapshots, 1463 + for_each_btree_key(trans, iter, BTREE_ID_snapshots, 1464 1464 POS_MIN, 0, k, ret) { 1465 1465 u32 snapshot = k.k->p.offset; 1466 1466 u32 equiv = bch2_snapshot_equiv(c, snapshot); ··· 1468 1468 if (equiv != snapshot) 1469 1469 snapshot_list_add(c, &deleted_interior, snapshot); 1470 1470 } 1471 - bch2_trans_iter_exit(&trans, &iter); 1471 + bch2_trans_iter_exit(trans, &iter); 1472 1472 1473 1473 /* 1474 1474 * Fixing children of deleted snapshots can't be done completely 1475 1475 * atomically, if we crash between here and when we delete the interior 1476 1476 * nodes some depth fields will be off: 1477 1477 */ 1478 - ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_snapshots, POS_MIN, 1478 + ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN, 1479 1479 BTREE_ITER_INTENT, k, 1480 1480 NULL, NULL, BTREE_INSERT_NOFAIL, 1481 - bch2_fix_child_of_deleted_snapshot(&trans, &iter, k, &deleted_interior)); 1481 + bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior)); 1482 1482 if (ret) 1483 1483 goto err; 1484 1484 1485 1485 darray_for_each(deleted, i) { 1486 - ret = commit_do(&trans, NULL, NULL, 0, 1487 - bch2_snapshot_node_delete(&trans, *i)); 1486 + ret = commit_do(trans, NULL, NULL, 0, 1487 + bch2_snapshot_node_delete(trans, *i)); 1488 1488 if (ret) { 1489 1489 bch_err_msg(c, ret, "deleting snapshot %u", *i); 1490 1490 goto err; ··· 1492 1492 } 1493 1493 1494 1494 darray_for_each(deleted_interior, i) { 1495 - ret = commit_do(&trans, NULL, NULL, 0, 1496 - bch2_snapshot_node_delete(&trans, *i)); 1495 + ret = commit_do(trans, NULL, NULL, 0, 1496 + bch2_snapshot_node_delete(trans, *i)); 1497 1497 if (ret) { 1498 1498 bch_err_msg(c, ret, "deleting snapshot %u", *i); 1499 1499 goto err; ··· 1504 1504 err: 1505 1505 darray_exit(&deleted_interior); 1506 1506 darray_exit(&deleted); 1507 - bch2_trans_exit(&trans); 1507 + bch2_trans_put(trans); 1508 1508 if (ret) 1509 1509 bch_err_fn(c, ret); 1510 1510 return ret; ··· 1671 1671 int ret = 0; 1672 1672 1673 1673 ret = bch2_trans_run(c, 1674 - for_each_btree_key2(&trans, iter, BTREE_ID_snapshots, 1674 + for_each_btree_key2(trans, iter, BTREE_ID_snapshots, 1675 1675 POS_MIN, 0, k, 1676 - bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?: 1677 - bch2_snapshot_set_equiv(&trans, k)) ?: 1678 - for_each_btree_key2(&trans, iter, BTREE_ID_snapshots, 1676 + bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?: 1677 + bch2_snapshot_set_equiv(trans, k)) ?: 1678 + for_each_btree_key2(trans, iter, BTREE_ID_snapshots, 1679 1679 POS_MIN, 0, k, 1680 1680 (set_is_ancestor_bitmap(c, k.k->p.offset), 0))); 1681 1681 if (ret)
+3 -3
fs/bcachefs/subvolume.c
··· 86 86 int ret; 87 87 88 88 ret = bch2_trans_run(c, 89 - for_each_btree_key_commit(&trans, iter, 89 + for_each_btree_key_commit(trans, iter, 90 90 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k, 91 91 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, 92 - check_subvol(&trans, &iter, k))); 92 + check_subvol(trans, &iter, k))); 93 93 if (ret) 94 94 bch_err_fn(c, ret); 95 95 return ret; ··· 293 293 bch2_evict_subvolume_inodes(c, &s); 294 294 295 295 for (id = s.data; id < s.data + s.nr; id++) { 296 - ret = bch2_trans_run(c, bch2_subvolume_delete(&trans, *id)); 296 + ret = bch2_trans_run(c, bch2_subvolume_delete(trans, *id)); 297 297 if (ret) { 298 298 bch_err_msg(c, ret, "deleting subvolume %u", *id); 299 299 break;
-7
fs/bcachefs/super.c
··· 470 470 static void __bch2_fs_free(struct bch_fs *c) 471 471 { 472 472 unsigned i; 473 - int cpu; 474 473 475 474 for (i = 0; i < BCH_TIME_STAT_NR; i++) 476 475 bch2_time_stats_exit(&c->times[i]); ··· 501 502 percpu_free_rwsem(&c->mark_lock); 502 503 free_percpu(c->online_reserved); 503 504 504 - if (c->btree_paths_bufs) 505 - for_each_possible_cpu(cpu) 506 - kfree(per_cpu_ptr(c->btree_paths_bufs, cpu)->path); 507 - 508 505 darray_exit(&c->btree_roots_extra); 509 - free_percpu(c->btree_paths_bufs); 510 506 free_percpu(c->pcpu); 511 507 mempool_exit(&c->large_bkey_pool); 512 508 mempool_exit(&c->btree_bounce_pool); ··· 823 829 BIOSET_NEED_BVECS) || 824 830 !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) || 825 831 !(c->online_reserved = alloc_percpu(u64)) || 826 - !(c->btree_paths_bufs = alloc_percpu(struct btree_path_buf)) || 827 832 mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, 828 833 btree_bytes(c)) || 829 834 mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
+5 -5
fs/bcachefs/sysfs.c
··· 252 252 253 253 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c) 254 254 { 255 - struct btree_trans trans; 255 + struct btree_trans *trans; 256 256 struct btree_iter iter; 257 257 struct bkey_s_c k; 258 258 enum btree_id id; ··· 268 268 if (!test_bit(BCH_FS_STARTED, &c->flags)) 269 269 return -EPERM; 270 270 271 - bch2_trans_init(&trans, c, 0, 0); 271 + trans = bch2_trans_get(c); 272 272 273 273 for (id = 0; id < BTREE_ID_NR; id++) { 274 274 if (!btree_type_has_ptrs(id)) 275 275 continue; 276 276 277 - for_each_btree_key(&trans, iter, id, POS_MIN, 277 + for_each_btree_key(trans, iter, id, POS_MIN, 278 278 BTREE_ITER_ALL_SNAPSHOTS, k, ret) { 279 279 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 280 280 const union bch_extent_entry *entry; ··· 308 308 else if (compressed) 309 309 nr_compressed_extents++; 310 310 } 311 - bch2_trans_iter_exit(&trans, &iter); 311 + bch2_trans_iter_exit(trans, &iter); 312 312 } 313 313 314 - bch2_trans_exit(&trans); 314 + bch2_trans_put(trans); 315 315 316 316 if (ret) 317 317 return ret;
+92 -113
fs/bcachefs/tests.c
··· 31 31 32 32 static int test_delete(struct bch_fs *c, u64 nr) 33 33 { 34 - struct btree_trans trans; 34 + struct btree_trans *trans = bch2_trans_get(c); 35 35 struct btree_iter iter; 36 36 struct bkey_i_cookie k; 37 37 int ret; ··· 39 39 bkey_cookie_init(&k.k_i); 40 40 k.k.p.snapshot = U32_MAX; 41 41 42 - bch2_trans_init(&trans, c, 0, 0); 43 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p, 42 + bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p, 44 43 BTREE_ITER_INTENT); 45 44 46 - ret = commit_do(&trans, NULL, NULL, 0, 45 + ret = commit_do(trans, NULL, NULL, 0, 47 46 bch2_btree_iter_traverse(&iter) ?: 48 - bch2_trans_update(&trans, &iter, &k.k_i, 0)); 47 + bch2_trans_update(trans, &iter, &k.k_i, 0)); 49 48 if (ret) { 50 49 bch_err_msg(c, ret, "update error"); 51 50 goto err; 52 51 } 53 52 54 53 pr_info("deleting once"); 55 - ret = commit_do(&trans, NULL, NULL, 0, 54 + ret = commit_do(trans, NULL, NULL, 0, 56 55 bch2_btree_iter_traverse(&iter) ?: 57 - bch2_btree_delete_at(&trans, &iter, 0)); 56 + bch2_btree_delete_at(trans, &iter, 0)); 58 57 if (ret) { 59 58 bch_err_msg(c, ret, "delete error (first)"); 60 59 goto err; 61 60 } 62 61 63 62 pr_info("deleting twice"); 64 - ret = commit_do(&trans, NULL, NULL, 0, 63 + ret = commit_do(trans, NULL, NULL, 0, 65 64 bch2_btree_iter_traverse(&iter) ?: 66 - bch2_btree_delete_at(&trans, &iter, 0)); 65 + bch2_btree_delete_at(trans, &iter, 0)); 67 66 if (ret) { 68 67 bch_err_msg(c, ret, "delete error (second)"); 69 68 goto err; 70 69 } 71 70 err: 72 - bch2_trans_iter_exit(&trans, &iter); 73 - bch2_trans_exit(&trans); 71 + bch2_trans_iter_exit(trans, &iter); 72 + bch2_trans_put(trans); 74 73 return ret; 75 74 } 76 75 77 76 static int test_delete_written(struct bch_fs *c, u64 nr) 78 77 { 79 - struct btree_trans trans; 78 + struct btree_trans *trans = bch2_trans_get(c); 80 79 struct btree_iter iter; 81 80 struct bkey_i_cookie k; 82 81 int ret; ··· 83 84 bkey_cookie_init(&k.k_i); 84 85 k.k.p.snapshot = U32_MAX; 85 86 86 - bch2_trans_init(&trans, c, 0, 0); 87 - 88 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, k.k.p, 87 + bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p, 89 88 BTREE_ITER_INTENT); 90 89 91 - ret = commit_do(&trans, NULL, NULL, 0, 90 + ret = commit_do(trans, NULL, NULL, 0, 92 91 bch2_btree_iter_traverse(&iter) ?: 93 - bch2_trans_update(&trans, &iter, &k.k_i, 0)); 92 + bch2_trans_update(trans, &iter, &k.k_i, 0)); 94 93 if (ret) { 95 94 bch_err_msg(c, ret, "update error"); 96 95 goto err; 97 96 } 98 97 99 - bch2_trans_unlock(&trans); 98 + bch2_trans_unlock(trans); 100 99 bch2_journal_flush_all_pins(&c->journal); 101 100 102 - ret = commit_do(&trans, NULL, NULL, 0, 101 + ret = commit_do(trans, NULL, NULL, 0, 103 102 bch2_btree_iter_traverse(&iter) ?: 104 - bch2_btree_delete_at(&trans, &iter, 0)); 103 + bch2_btree_delete_at(trans, &iter, 0)); 105 104 if (ret) { 106 105 bch_err_msg(c, ret, "delete error"); 107 106 goto err; 108 107 } 109 108 err: 110 - bch2_trans_iter_exit(&trans, &iter); 111 - bch2_trans_exit(&trans); 109 + bch2_trans_iter_exit(trans, &iter); 110 + bch2_trans_put(trans); 112 111 return ret; 113 112 } 114 113 115 114 static int test_iterate(struct bch_fs *c, u64 nr) 116 115 { 117 - struct btree_trans trans; 116 + struct btree_trans *trans = bch2_trans_get(c); 118 117 struct btree_iter iter = { NULL }; 119 118 struct bkey_s_c k; 120 119 u64 i; 121 120 int ret = 0; 122 - 123 - bch2_trans_init(&trans, c, 0, 0); 124 121 125 122 delete_test_keys(c); 126 123 ··· 140 145 141 146 i = 0; 142 147 143 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs, 148 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs, 144 149 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 145 150 0, k, ({ 146 151 BUG_ON(k.k->p.offset != i++); ··· 155 160 156 161 pr_info("iterating backwards"); 157 162 158 - ret = for_each_btree_key_reverse(&trans, iter, BTREE_ID_xattrs, 163 + ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs, 159 164 SPOS(0, U64_MAX, U32_MAX), 0, k, 160 165 ({ 161 166 BUG_ON(k.k->p.offset != --i); ··· 168 173 169 174 BUG_ON(i); 170 175 err: 171 - bch2_trans_iter_exit(&trans, &iter); 172 - bch2_trans_exit(&trans); 176 + bch2_trans_iter_exit(trans, &iter); 177 + bch2_trans_put(trans); 173 178 return ret; 174 179 } 175 180 176 181 static int test_iterate_extents(struct bch_fs *c, u64 nr) 177 182 { 178 - struct btree_trans trans; 183 + struct btree_trans *trans = bch2_trans_get(c); 179 184 struct btree_iter iter = { NULL }; 180 185 struct bkey_s_c k; 181 186 u64 i; 182 187 int ret = 0; 183 - 184 - bch2_trans_init(&trans, c, 0, 0); 185 188 186 189 delete_test_keys(c); 187 190 ··· 204 211 205 212 i = 0; 206 213 207 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents, 214 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents, 208 215 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 209 216 0, k, ({ 210 217 BUG_ON(bkey_start_offset(k.k) != i); ··· 220 227 221 228 pr_info("iterating backwards"); 222 229 223 - ret = for_each_btree_key_reverse(&trans, iter, BTREE_ID_extents, 230 + ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents, 224 231 SPOS(0, U64_MAX, U32_MAX), 0, k, 225 232 ({ 226 233 BUG_ON(k.k->p.offset != i); ··· 234 241 235 242 BUG_ON(i); 236 243 err: 237 - bch2_trans_iter_exit(&trans, &iter); 238 - bch2_trans_exit(&trans); 244 + bch2_trans_iter_exit(trans, &iter); 245 + bch2_trans_put(trans); 239 246 return ret; 240 247 } 241 248 242 249 static int test_iterate_slots(struct bch_fs *c, u64 nr) 243 250 { 244 - struct btree_trans trans; 251 + struct btree_trans *trans = bch2_trans_get(c); 245 252 struct btree_iter iter = { NULL }; 246 253 struct bkey_s_c k; 247 254 u64 i; 248 255 int ret = 0; 249 - 250 - bch2_trans_init(&trans, c, 0, 0); 251 256 252 257 delete_test_keys(c); 253 258 ··· 269 278 270 279 i = 0; 271 280 272 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs, 281 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs, 273 282 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 274 283 0, k, ({ 275 284 BUG_ON(k.k->p.offset != i); ··· 287 296 288 297 i = 0; 289 298 290 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs, 299 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs, 291 300 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 292 301 BTREE_ITER_SLOTS, k, ({ 293 302 if (i >= nr * 2) ··· 305 314 } 306 315 ret = 0; 307 316 err: 308 - bch2_trans_exit(&trans); 317 + bch2_trans_put(trans); 309 318 return ret; 310 319 } 311 320 312 321 static int test_iterate_slots_extents(struct bch_fs *c, u64 nr) 313 322 { 314 - struct btree_trans trans; 323 + struct btree_trans *trans = bch2_trans_get(c); 315 324 struct btree_iter iter = { NULL }; 316 325 struct bkey_s_c k; 317 326 u64 i; 318 327 int ret = 0; 319 - 320 - bch2_trans_init(&trans, c, 0, 0); 321 328 322 329 delete_test_keys(c); 323 330 ··· 340 351 341 352 i = 0; 342 353 343 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents, 354 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents, 344 355 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 345 356 0, k, ({ 346 357 BUG_ON(bkey_start_offset(k.k) != i + 8); ··· 359 370 360 371 i = 0; 361 372 362 - ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents, 373 + ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents, 363 374 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 364 375 BTREE_ITER_SLOTS, k, ({ 365 376 if (i == nr) ··· 377 388 } 378 389 ret = 0; 379 390 err: 380 - bch2_trans_exit(&trans); 391 + bch2_trans_put(trans); 381 392 return 0; 382 393 } 383 394 ··· 387 398 */ 388 399 static int test_peek_end(struct bch_fs *c, u64 nr) 389 400 { 390 - struct btree_trans trans; 401 + struct btree_trans *trans = bch2_trans_get(c); 391 402 struct btree_iter iter; 392 403 struct bkey_s_c k; 393 404 394 - bch2_trans_init(&trans, c, 0, 0); 395 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, 405 + bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, 396 406 SPOS(0, 0, U32_MAX), 0); 397 407 398 - lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 408 + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 399 409 BUG_ON(k.k); 400 410 401 - lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 411 + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 402 412 BUG_ON(k.k); 403 413 404 - bch2_trans_iter_exit(&trans, &iter); 405 - bch2_trans_exit(&trans); 414 + bch2_trans_iter_exit(trans, &iter); 415 + bch2_trans_put(trans); 406 416 return 0; 407 417 } 408 418 409 419 static int test_peek_end_extents(struct bch_fs *c, u64 nr) 410 420 { 411 - struct btree_trans trans; 421 + struct btree_trans *trans = bch2_trans_get(c); 412 422 struct btree_iter iter; 413 423 struct bkey_s_c k; 414 424 415 - bch2_trans_init(&trans, c, 0, 0); 416 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, 425 + bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, 417 426 SPOS(0, 0, U32_MAX), 0); 418 427 419 - lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 428 + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 420 429 BUG_ON(k.k); 421 430 422 - lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 431 + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 423 432 BUG_ON(k.k); 424 433 425 - bch2_trans_iter_exit(&trans, &iter); 426 - bch2_trans_exit(&trans); 434 + bch2_trans_iter_exit(trans, &iter); 435 + bch2_trans_put(trans); 427 436 return 0; 428 437 } 429 438 ··· 497 510 k.k_i.k.size = len; 498 511 499 512 ret = bch2_trans_do(c, NULL, NULL, 0, 500 - bch2_btree_insert_nonextent(&trans, BTREE_ID_extents, &k.k_i, 513 + bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i, 501 514 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)); 502 515 if (ret) 503 516 bch_err_fn(c, ret); ··· 520 533 /* Test skipping over keys in unrelated snapshots: */ 521 534 static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi) 522 535 { 523 - struct btree_trans trans; 536 + struct btree_trans *trans; 524 537 struct btree_iter iter; 525 538 struct bkey_s_c k; 526 539 struct bkey_i_cookie cookie; ··· 532 545 if (ret) 533 546 return ret; 534 547 535 - bch2_trans_init(&trans, c, 0, 0); 536 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, 548 + trans = bch2_trans_get(c); 549 + bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, 537 550 SPOS(0, 0, snapid_lo), 0); 538 - lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 551 + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX)))); 539 552 540 553 BUG_ON(k.k->p.snapshot != U32_MAX); 541 554 542 - bch2_trans_iter_exit(&trans, &iter); 543 - bch2_trans_exit(&trans); 555 + bch2_trans_iter_exit(trans, &iter); 556 + bch2_trans_put(trans); 544 557 return ret; 545 558 } 546 559 ··· 558 571 return ret; 559 572 560 573 ret = bch2_trans_do(c, NULL, NULL, 0, 561 - bch2_snapshot_node_create(&trans, U32_MAX, 574 + bch2_snapshot_node_create(trans, U32_MAX, 562 575 snapids, 563 576 snapid_subvols, 564 577 2)); ··· 589 602 590 603 static int rand_insert(struct bch_fs *c, u64 nr) 591 604 { 592 - struct btree_trans trans; 605 + struct btree_trans *trans = bch2_trans_get(c); 593 606 struct bkey_i_cookie k; 594 607 int ret = 0; 595 608 u64 i; 596 - 597 - bch2_trans_init(&trans, c, 0, 0); 598 609 599 610 for (i = 0; i < nr; i++) { 600 611 bkey_cookie_init(&k.k_i); 601 612 k.k.p.offset = test_rand(); 602 613 k.k.p.snapshot = U32_MAX; 603 614 604 - ret = commit_do(&trans, NULL, NULL, 0, 605 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k.k_i, 0)); 615 + ret = commit_do(trans, NULL, NULL, 0, 616 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k.k_i, 0)); 606 617 if (ret) 607 618 break; 608 619 } 609 620 610 - bch2_trans_exit(&trans); 621 + bch2_trans_put(trans); 611 622 return ret; 612 623 } 613 624 614 625 static int rand_insert_multi(struct bch_fs *c, u64 nr) 615 626 { 616 - struct btree_trans trans; 627 + struct btree_trans *trans = bch2_trans_get(c); 617 628 struct bkey_i_cookie k[8]; 618 629 int ret = 0; 619 630 unsigned j; 620 631 u64 i; 621 - 622 - bch2_trans_init(&trans, c, 0, 0); 623 632 624 633 for (i = 0; i < nr; i += ARRAY_SIZE(k)) { 625 634 for (j = 0; j < ARRAY_SIZE(k); j++) { ··· 624 641 k[j].k.p.snapshot = U32_MAX; 625 642 } 626 643 627 - ret = commit_do(&trans, NULL, NULL, 0, 628 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?: 629 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?: 630 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?: 631 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?: 632 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?: 633 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?: 634 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?: 635 - bch2_btree_insert_trans(&trans, BTREE_ID_xattrs, &k[7].k_i, 0)); 644 + ret = commit_do(trans, NULL, NULL, 0, 645 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[0].k_i, 0) ?: 646 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[1].k_i, 0) ?: 647 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[2].k_i, 0) ?: 648 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[3].k_i, 0) ?: 649 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[4].k_i, 0) ?: 650 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?: 651 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?: 652 + bch2_btree_insert_trans(trans, BTREE_ID_xattrs, &k[7].k_i, 0)); 636 653 if (ret) 637 654 break; 638 655 } 639 656 640 - bch2_trans_exit(&trans); 657 + bch2_trans_put(trans); 641 658 return ret; 642 659 } 643 660 644 661 static int rand_lookup(struct bch_fs *c, u64 nr) 645 662 { 646 - struct btree_trans trans; 663 + struct btree_trans *trans = bch2_trans_get(c); 647 664 struct btree_iter iter; 648 665 struct bkey_s_c k; 649 666 int ret = 0; 650 667 u64 i; 651 668 652 - bch2_trans_init(&trans, c, 0, 0); 653 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, 669 + bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, 654 670 SPOS(0, 0, U32_MAX), 0); 655 671 656 672 for (i = 0; i < nr; i++) { 657 673 bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX)); 658 674 659 - lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter))); 675 + lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter))); 660 676 ret = bkey_err(k); 661 677 if (ret) 662 678 break; 663 679 } 664 680 665 - bch2_trans_iter_exit(&trans, &iter); 666 - bch2_trans_exit(&trans); 681 + bch2_trans_iter_exit(trans, &iter); 682 + bch2_trans_put(trans); 667 683 return ret; 668 684 } 669 685 ··· 694 712 695 713 static int rand_mixed(struct bch_fs *c, u64 nr) 696 714 { 697 - struct btree_trans trans; 715 + struct btree_trans *trans = bch2_trans_get(c); 698 716 struct btree_iter iter; 699 717 struct bkey_i_cookie cookie; 700 718 int ret = 0; 701 719 u64 i, rand; 702 720 703 - bch2_trans_init(&trans, c, 0, 0); 704 - bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, 721 + bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, 705 722 SPOS(0, 0, U32_MAX), 0); 706 723 707 724 for (i = 0; i < nr; i++) { 708 725 rand = test_rand(); 709 - ret = commit_do(&trans, NULL, NULL, 0, 710 - rand_mixed_trans(&trans, &iter, &cookie, i, rand)); 726 + ret = commit_do(trans, NULL, NULL, 0, 727 + rand_mixed_trans(trans, &iter, &cookie, i, rand)); 711 728 if (ret) 712 729 break; 713 730 } 714 731 715 - bch2_trans_iter_exit(&trans, &iter); 716 - bch2_trans_exit(&trans); 732 + bch2_trans_iter_exit(trans, &iter); 733 + bch2_trans_put(trans); 717 734 return ret; 718 735 } 719 736 ··· 740 759 741 760 static int rand_delete(struct bch_fs *c, u64 nr) 742 761 { 743 - struct btree_trans trans; 762 + struct btree_trans *trans = bch2_trans_get(c); 744 763 int ret = 0; 745 764 u64 i; 746 - 747 - bch2_trans_init(&trans, c, 0, 0); 748 765 749 766 for (i = 0; i < nr; i++) { 750 767 struct bpos pos = SPOS(0, test_rand(), U32_MAX); 751 768 752 - ret = commit_do(&trans, NULL, NULL, 0, 753 - __do_delete(&trans, pos)); 769 + ret = commit_do(trans, NULL, NULL, 0, 770 + __do_delete(trans, pos)); 754 771 if (ret) 755 772 break; 756 773 } 757 774 758 - bch2_trans_exit(&trans); 775 + bch2_trans_put(trans); 759 776 return ret; 760 777 } 761 778 ··· 766 787 bkey_cookie_init(&insert.k_i); 767 788 768 789 return bch2_trans_run(c, 769 - for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs, 790 + for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, 770 791 SPOS(0, 0, U32_MAX), 771 792 BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, 772 793 NULL, NULL, 0, ({ 773 794 if (iter.pos.offset >= nr) 774 795 break; 775 796 insert.k.p = iter.pos; 776 - bch2_trans_update(&trans, &iter, &insert.k_i, 0); 797 + bch2_trans_update(trans, &iter, &insert.k_i, 0); 777 798 }))); 778 799 } 779 800 ··· 783 804 struct bkey_s_c k; 784 805 785 806 return bch2_trans_run(c, 786 - for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs, 807 + for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs, 787 808 SPOS(0, 0, U32_MAX), POS(0, U64_MAX), 788 809 0, k, 789 810 0)); ··· 795 816 struct bkey_s_c k; 796 817 797 818 return bch2_trans_run(c, 798 - for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs, 819 + for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs, 799 820 SPOS(0, 0, U32_MAX), 800 821 BTREE_ITER_INTENT, k, 801 822 NULL, NULL, 0, ({ 802 823 struct bkey_i_cookie u; 803 824 804 825 bkey_reassemble(&u.k_i, k); 805 - bch2_trans_update(&trans, &iter, &u.k_i, 0); 826 + bch2_trans_update(trans, &iter, &u.k_i, 0); 806 827 }))); 807 828 } 808 829
+12 -18
fs/bcachefs/xattr.c
··· 307 307 { 308 308 struct bch_fs *c = dentry->d_sb->s_fs_info; 309 309 struct bch_inode_info *inode = to_bch_ei(dentry->d_inode); 310 - struct btree_trans trans; 310 + struct btree_trans *trans = bch2_trans_get(c); 311 311 struct btree_iter iter; 312 312 struct bkey_s_c k; 313 313 struct xattr_buf buf = { .buf = buffer, .len = buffer_size }; 314 314 u64 offset = 0, inum = inode->ei_inode.bi_inum; 315 315 u32 snapshot; 316 316 int ret; 317 - 318 - bch2_trans_init(&trans, c, 0, 0); 319 317 retry: 320 - bch2_trans_begin(&trans); 318 + bch2_trans_begin(trans); 321 319 iter = (struct btree_iter) { NULL }; 322 320 323 - ret = bch2_subvolume_get_snapshot(&trans, inode->ei_subvol, &snapshot); 321 + ret = bch2_subvolume_get_snapshot(trans, inode->ei_subvol, &snapshot); 324 322 if (ret) 325 323 goto err; 326 324 327 - for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_xattrs, 325 + for_each_btree_key_upto_norestart(trans, iter, BTREE_ID_xattrs, 328 326 SPOS(inum, offset, snapshot), 329 327 POS(inum, U64_MAX), 0, k, ret) { 330 328 if (k.k->type != KEY_TYPE_xattr) ··· 334 336 } 335 337 336 338 offset = iter.pos.offset; 337 - bch2_trans_iter_exit(&trans, &iter); 339 + bch2_trans_iter_exit(trans, &iter); 338 340 err: 339 341 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 340 342 goto retry; 341 343 342 - bch2_trans_exit(&trans); 344 + bch2_trans_put(trans); 343 345 344 346 if (ret) 345 347 goto out; ··· 364 366 struct bch_inode_info *inode = to_bch_ei(vinode); 365 367 struct bch_fs *c = inode->v.i_sb->s_fs_info; 366 368 int ret = bch2_trans_do(c, NULL, NULL, 0, 367 - bch2_xattr_get_trans(&trans, inode, name, buffer, size, handler->flags)); 369 + bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags)); 368 370 369 371 return bch2_err_class(ret); 370 372 } ··· 379 381 struct bch_fs *c = inode->v.i_sb->s_fs_info; 380 382 struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode); 381 383 struct bch_inode_unpacked inode_u; 382 - struct btree_trans trans; 383 384 int ret; 384 385 385 - bch2_trans_init(&trans, c, 0, 0); 386 - 387 - ret = commit_do(&trans, NULL, NULL, 0, 388 - bch2_xattr_set(&trans, inode_inum(inode), &inode_u, 386 + ret = bch2_trans_run(c, 387 + commit_do(trans, NULL, NULL, 0, 388 + bch2_xattr_set(trans, inode_inum(inode), &inode_u, 389 389 &hash, name, value, size, 390 - handler->flags, flags)); 391 - if (!ret) 392 - bch2_inode_update_after_write(&trans, inode, &inode_u, ATTR_CTIME); 393 - bch2_trans_exit(&trans); 390 + handler->flags, flags)) ?: 391 + (bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0)); 394 392 395 393 return bch2_err_class(ret); 396 394 }