Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: Don't run bch2_delete_dead_snapshots() unnecessarily

Be a bit more careful about when bch2_delete_dead_snapshots needs to
run: it only needs to run synchronously if we're running fsck, and it
only needs to run at all if we have snapshot nodes to delete or if fsck
has noticed that it needs to run.

Also:
Rename BCH_FS_HAVE_DELETED_SNAPSHOTS -> BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS

Kill bch2_delete_dead_snapshots_hook(), move functionality to
bch2_mark_snapshot()

Factor out bch2_check_snapshot_needs_deletion(), to explicitly check
if we need to be running snapshot deletion.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+51 -54
+1 -1
fs/bcachefs/bcachefs.h
··· 578 578 BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */ 579 579 BCH_FS_NEED_ANOTHER_GC, 580 580 581 - BCH_FS_HAVE_DELETED_SNAPSHOTS, 581 + BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, 582 582 583 583 /* errors: */ 584 584 BCH_FS_ERROR,
+1
fs/bcachefs/fsck.c
··· 447 447 bch2_btree_ids[btree_id], 448 448 pos.inode, pos.offset, 449 449 i->id, n.id, n.equiv); 450 + set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); 450 451 return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots); 451 452 } 452 453 }
+1 -1
fs/bcachefs/recovery.c
··· 901 901 } 902 902 kfree(clean); 903 903 904 - if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) { 904 + if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) { 905 905 bch2_fs_read_write_early(c); 906 906 bch2_delete_dead_snapshots_async(c); 907 907 }
+1 -1
fs/bcachefs/recovery_types.h
··· 27 27 x(check_snapshot_trees, PASS_FSCK) \ 28 28 x(check_snapshots, PASS_FSCK) \ 29 29 x(check_subvols, PASS_FSCK) \ 30 - x(delete_dead_snapshots, PASS_FSCK|PASS_UNCLEAN) \ 30 + x(delete_dead_snapshots, PASS_FSCK) \ 31 31 x(fs_upgrade_for_subvolumes, 0) \ 32 32 x(resume_logged_ops, PASS_ALWAYS) \ 33 33 x(check_inodes, PASS_FSCK) \
+45 -32
fs/bcachefs/snapshot.c
··· 325 325 __set_is_ancestor_bitmap(c, id); 326 326 327 327 if (BCH_SNAPSHOT_DELETED(s.v)) { 328 - set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags); 329 - c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_delete_dead_snapshots); 328 + set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); 329 + if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots) 330 + bch2_delete_dead_snapshots_async(c); 330 331 } 331 332 } else { 332 333 memset(t, 0, sizeof(*t)); ··· 1252 1251 return 0; 1253 1252 } 1254 1253 1255 - /* 1256 - * For a given snapshot, if it doesn't have a subvolume that points to it, and 1257 - * it doesn't have child snapshot nodes - it's now redundant and we can mark it 1258 - * as deleted. 1259 - */ 1260 - static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter, 1261 - struct bkey_s_c k) 1254 + static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k) 1262 1255 { 1263 1256 struct bkey_s_c_snapshot snap; 1264 1257 u32 children[2]; ··· 1273 1278 bch2_snapshot_live(trans, children[1]); 1274 1279 if (ret < 0) 1275 1280 return ret; 1281 + return !ret; 1282 + } 1276 1283 1277 - if (!ret) 1278 - return bch2_snapshot_node_set_deleted(trans, k.k->p.offset); 1279 - return 0; 1284 + /* 1285 + * For a given snapshot, if it doesn't have a subvolume that points to it, and 1286 + * it doesn't have child snapshot nodes - it's now redundant and we can mark it 1287 + * as deleted. 1288 + */ 1289 + static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k) 1290 + { 1291 + int ret = bch2_snapshot_needs_delete(trans, k); 1292 + 1293 + return ret <= 0 1294 + ? ret 1295 + : bch2_snapshot_node_set_deleted(trans, k.k->p.offset); 1280 1296 } 1281 1297 1282 1298 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n, ··· 1375 1369 u32 *i, id; 1376 1370 int ret = 0; 1377 1371 1372 + if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) 1373 + return 0; 1374 + 1378 1375 if (!test_bit(BCH_FS_STARTED, &c->flags)) { 1379 1376 ret = bch2_fs_read_write_early(c); 1380 1377 if (ret) { ··· 1395 1386 ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, 1396 1387 POS_MIN, 0, k, 1397 1388 NULL, NULL, 0, 1398 - bch2_delete_redundant_snapshot(trans, &iter, k)); 1389 + bch2_delete_redundant_snapshot(trans, k)); 1399 1390 if (ret) { 1400 1391 bch_err_msg(c, ret, "deleting redundant snapshots"); 1401 1392 goto err; ··· 1501 1492 goto err_create_lock; 1502 1493 } 1503 1494 } 1504 - 1505 - clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags); 1506 1495 err_create_lock: 1507 1496 up_write(&c->snapshot_create_lock); 1508 1497 err: ··· 1516 1509 { 1517 1510 struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work); 1518 1511 1519 - if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) 1520 - bch2_delete_dead_snapshots(c); 1512 + bch2_delete_dead_snapshots(c); 1521 1513 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); 1522 1514 } 1523 1515 ··· 1525 1519 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_delete_dead_snapshots) && 1526 1520 !queue_work(c->write_ref_wq, &c->snapshot_delete_work)) 1527 1521 bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots); 1528 - } 1529 - 1530 - int bch2_delete_dead_snapshots_hook(struct btree_trans *trans, 1531 - struct btree_trans_commit_hook *h) 1532 - { 1533 - struct bch_fs *c = trans->c; 1534 - 1535 - set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags); 1536 - 1537 - if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_delete_dead_snapshots) 1538 - return 0; 1539 - 1540 - bch2_delete_dead_snapshots_async(c); 1541 - return 0; 1542 1522 } 1543 1523 1544 1524 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans, ··· 1657 1665 return ret ?: trans_was_restarted(trans, restart_count); 1658 1666 } 1659 1667 1668 + static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k) 1669 + { 1670 + struct bch_fs *c = trans->c; 1671 + struct bkey_s_c_snapshot snap; 1672 + int ret = 0; 1673 + 1674 + if (k.k->type != KEY_TYPE_snapshot) 1675 + return 0; 1676 + 1677 + snap = bkey_s_c_to_snapshot(k); 1678 + if (BCH_SNAPSHOT_DELETED(snap.v) || 1679 + bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset || 1680 + (ret = bch2_snapshot_needs_delete(trans, k)) > 0) { 1681 + set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags); 1682 + return 0; 1683 + } 1684 + 1685 + return ret; 1686 + } 1687 + 1660 1688 int bch2_snapshots_read(struct bch_fs *c) 1661 1689 { 1662 1690 struct btree_iter iter; ··· 1687 1675 for_each_btree_key2(trans, iter, BTREE_ID_snapshots, 1688 1676 POS_MIN, 0, k, 1689 1677 bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?: 1690 - bch2_snapshot_set_equiv(trans, k)) ?: 1678 + bch2_snapshot_set_equiv(trans, k) ?: 1679 + bch2_check_snapshot_needs_deletion(trans, k)) ?: 1691 1680 for_each_btree_key2(trans, iter, BTREE_ID_snapshots, 1692 1681 POS_MIN, 0, k, 1693 1682 (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
-2
fs/bcachefs/snapshot.h
··· 244 244 int bch2_check_snapshots(struct bch_fs *); 245 245 246 246 int bch2_snapshot_node_set_deleted(struct btree_trans *, u32); 247 - int bch2_delete_dead_snapshots_hook(struct btree_trans *, 248 - struct btree_trans_commit_hook *); 249 247 void bch2_delete_dead_snapshots_work(struct work_struct *); 250 248 251 249 int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos);
+2 -17
fs/bcachefs/subvolume.c
··· 230 230 { 231 231 struct btree_iter iter; 232 232 struct bkey_s_c_subvolume subvol; 233 - struct btree_trans_commit_hook *h; 234 233 u32 snapid; 235 234 int ret = 0; 236 235 ··· 245 246 246 247 snapid = le32_to_cpu(subvol.v->snapshot); 247 248 248 - ret = bch2_btree_delete_at(trans, &iter, 0); 249 - if (ret) 250 - goto err; 251 - 252 - ret = bch2_snapshot_node_set_deleted(trans, snapid); 253 - if (ret) 254 - goto err; 255 - 256 - h = bch2_trans_kmalloc(trans, sizeof(*h)); 257 - ret = PTR_ERR_OR_ZERO(h); 258 - if (ret) 259 - goto err; 260 - 261 - h->fn = bch2_delete_dead_snapshots_hook; 262 - bch2_trans_commit_hook(trans, h); 263 - err: 249 + ret = bch2_btree_delete_at(trans, &iter, 0) ?: 250 + bch2_snapshot_node_set_deleted(trans, snapid); 264 251 bch2_trans_iter_exit(trans, &iter); 265 252 return ret; 266 253 }