Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: Enumerate fsck errors

This patch adds a superblock error counter for every distinct fsck
error; this means that when analyzing filesystems out in the wild we'll
be able to see what sorts of inconsistencies are being found and repair,
and hence what bugs to look for.

Errors validating bkeys are not yet considered distinct fsck errors, but
this patch adds a new helper, bkey_fsck_err(), in order to add distinct
error types for them as well.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+1165 -728
+83 -73
fs/bcachefs/alloc_background.c
··· 192 192 return DIV_ROUND_UP(bytes, sizeof(u64)); 193 193 } 194 194 195 - int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k, 195 + int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k, 196 196 enum bkey_invalid_flags flags, 197 197 struct printbuf *err) 198 198 { 199 199 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); 200 + int ret = 0; 200 201 201 202 /* allow for unknown fields */ 202 - if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) { 203 - prt_printf(err, "incorrect value size (%zu < %u)", 204 - bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); 205 - return -BCH_ERR_invalid_bkey; 206 - } 207 - 208 - return 0; 203 + bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err, 204 + alloc_v1_val_size_bad, 205 + "incorrect value size (%zu < %u)", 206 + bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); 207 + fsck_err: 208 + return ret; 209 209 } 210 210 211 - int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k, 211 + int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k, 212 212 enum bkey_invalid_flags flags, 213 213 struct printbuf *err) 214 214 { 215 215 struct bkey_alloc_unpacked u; 216 + int ret = 0; 216 217 217 - if (bch2_alloc_unpack_v2(&u, k)) { 218 - prt_printf(err, "unpack error"); 219 - return -BCH_ERR_invalid_bkey; 220 - } 221 - 222 - return 0; 218 + bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err, 219 + alloc_v2_unpack_error, 220 + "unpack error"); 221 + fsck_err: 222 + return ret; 223 223 } 224 224 225 - int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k, 225 + int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k, 226 226 enum bkey_invalid_flags flags, 227 227 struct printbuf *err) 228 228 { 229 229 struct bkey_alloc_unpacked u; 230 + int ret = 0; 230 231 231 - if (bch2_alloc_unpack_v3(&u, k)) { 232 - prt_printf(err, "unpack error"); 233 - return -BCH_ERR_invalid_bkey; 234 - } 235 - 236 - return 0; 232 + bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err, 233 + alloc_v2_unpack_error, 234 + "unpack error"); 235 + fsck_err: 236 + return ret; 237 237 } 238 238 239 - int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k, 239 + int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k, 240 240 enum bkey_invalid_flags flags, struct printbuf *err) 241 241 { 242 242 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); 243 + int ret = 0; 243 244 244 - if (alloc_v4_u64s(a.v) > bkey_val_u64s(k.k)) { 245 - prt_printf(err, "bad val size (%u > %zu)", 246 - alloc_v4_u64s(a.v), bkey_val_u64s(k.k)); 247 - return -BCH_ERR_invalid_bkey; 248 - } 245 + bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err, 246 + alloc_v4_val_size_bad, 247 + "bad val size (%u > %zu)", 248 + alloc_v4_u64s(a.v), bkey_val_u64s(k.k)); 249 249 250 - if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) && 251 - BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) { 252 - prt_printf(err, "invalid backpointers_start"); 253 - return -BCH_ERR_invalid_bkey; 254 - } 250 + bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) && 251 + BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err, 252 + alloc_v4_backpointers_start_bad, 253 + "invalid backpointers_start"); 255 254 256 - if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) { 257 - prt_printf(err, "invalid data type (got %u should be %u)", 258 - a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); 259 - return -BCH_ERR_invalid_bkey; 260 - } 255 + bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err, 256 + alloc_key_data_type_bad, 257 + "invalid data type (got %u should be %u)", 258 + a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); 261 259 262 260 switch (a.v->data_type) { 263 261 case BCH_DATA_free: 264 262 case BCH_DATA_need_gc_gens: 265 263 case BCH_DATA_need_discard: 266 - if (a.v->dirty_sectors || 267 - a.v->cached_sectors || 268 - a.v->stripe) { 269 - prt_printf(err, "empty data type free but have data"); 270 - return -BCH_ERR_invalid_bkey; 271 - } 264 + bkey_fsck_err_on(a.v->dirty_sectors || 265 + a.v->cached_sectors || 266 + a.v->stripe, c, err, 267 + alloc_key_empty_but_have_data, 268 + "empty data type free but have data"); 272 269 break; 273 270 case BCH_DATA_sb: 274 271 case BCH_DATA_journal: 275 272 case BCH_DATA_btree: 276 273 case BCH_DATA_user: 277 274 case BCH_DATA_parity: 278 - if (!a.v->dirty_sectors) { 279 - prt_printf(err, "data_type %s but dirty_sectors==0", 280 - bch2_data_types[a.v->data_type]); 281 - return -BCH_ERR_invalid_bkey; 282 - } 275 + bkey_fsck_err_on(!a.v->dirty_sectors, c, err, 276 + alloc_key_dirty_sectors_0, 277 + "data_type %s but dirty_sectors==0", 278 + bch2_data_types[a.v->data_type]); 283 279 break; 284 280 case BCH_DATA_cached: 285 - if (!a.v->cached_sectors || 286 - a.v->dirty_sectors || 287 - a.v->stripe) { 288 - prt_printf(err, "data type inconsistency"); 289 - return -BCH_ERR_invalid_bkey; 290 - } 281 + bkey_fsck_err_on(!a.v->cached_sectors || 282 + a.v->dirty_sectors || 283 + a.v->stripe, c, err, 284 + alloc_key_cached_inconsistency, 285 + "data type inconsistency"); 291 286 292 - if (!a.v->io_time[READ] && 293 - c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs) { 294 - prt_printf(err, "cached bucket with read_time == 0"); 295 - return -BCH_ERR_invalid_bkey; 296 - } 287 + bkey_fsck_err_on(!a.v->io_time[READ] && 288 + c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, 289 + c, err, 290 + alloc_key_cached_but_read_time_zero, 291 + "cached bucket with read_time == 0"); 297 292 break; 298 293 case BCH_DATA_stripe: 299 294 break; 300 295 } 301 - 302 - return 0; 296 + fsck_err: 297 + return ret; 303 298 } 304 299 305 300 static inline u64 swab40(u64 x) ··· 516 521 : 0; 517 522 } 518 523 519 - int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k, 524 + int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k, 520 525 enum bkey_invalid_flags flags, 521 526 struct printbuf *err) 522 527 { 523 - if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) { 524 - prt_printf(err, "bad val size (%zu != %zu)", 525 - bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); 526 - return -BCH_ERR_invalid_bkey; 527 - } 528 + int ret = 0; 528 529 529 - return 0; 530 + bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err, 531 + bucket_gens_val_size_bad, 532 + "bad val size (%zu != %zu)", 533 + bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); 534 + fsck_err: 535 + return ret; 530 536 } 531 537 532 538 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) ··· 982 986 int ret; 983 987 984 988 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c, 989 + alloc_key_to_missing_dev_bucket, 985 990 "alloc key for invalid device:bucket %llu:%llu", 986 991 alloc_k.k->p.inode, alloc_k.k->p.offset)) 987 992 return bch2_btree_delete_at(trans, alloc_iter, 0); ··· 1002 1005 1003 1006 if (k.k->type != discard_key_type && 1004 1007 (c->opts.reconstruct_alloc || 1005 - fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n" 1008 + fsck_err(c, need_discard_key_wrong, 1009 + "incorrect key in need_discard btree (got %s should be %s)\n" 1006 1010 " %s", 1007 1011 bch2_bkey_types[k.k->type], 1008 1012 bch2_bkey_types[discard_key_type], ··· 1033 1035 1034 1036 if (k.k->type != freespace_key_type && 1035 1037 (c->opts.reconstruct_alloc || 1036 - fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n" 1038 + fsck_err(c, freespace_key_wrong, 1039 + "incorrect key in freespace btree (got %s should be %s)\n" 1037 1040 " %s", 1038 1041 bch2_bkey_types[k.k->type], 1039 1042 bch2_bkey_types[freespace_key_type], ··· 1065 1066 1066 1067 if (a->gen != alloc_gen(k, gens_offset) && 1067 1068 (c->opts.reconstruct_alloc || 1068 - fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n" 1069 + fsck_err(c, bucket_gens_key_wrong, 1070 + "incorrect gen in bucket_gens btree (got %u should be %u)\n" 1069 1071 " %s", 1070 1072 alloc_gen(k, gens_offset), a->gen, 1071 1073 (printbuf_reset(&buf), ··· 1124 1124 1125 1125 if (k.k->type != KEY_TYPE_set && 1126 1126 (c->opts.reconstruct_alloc || 1127 - fsck_err(c, "hole in alloc btree missing in freespace btree\n" 1127 + fsck_err(c, freespace_hole_missing, 1128 + "hole in alloc btree missing in freespace btree\n" 1128 1129 " device %llu buckets %llu-%llu", 1129 1130 freespace_iter->pos.inode, 1130 1131 freespace_iter->pos.offset, ··· 1188 1187 1189 1188 for (i = gens_offset; i < gens_end_offset; i++) { 1190 1189 if (fsck_err_on(g.v.gens[i], c, 1190 + bucket_gens_hole_wrong, 1191 1191 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)", 1192 1192 bucket_gens_pos_to_alloc(k.k->p, i).inode, 1193 1193 bucket_gens_pos_to_alloc(k.k->p, i).offset, ··· 1246 1244 return ret; 1247 1245 1248 1246 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c, 1247 + need_discard_freespace_key_to_invalid_dev_bucket, 1249 1248 "entry in %s btree for nonexistant dev:bucket %llu:%llu", 1250 1249 bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) 1251 1250 goto delete; ··· 1256 1253 if (fsck_err_on(a->data_type != state || 1257 1254 (state == BCH_DATA_free && 1258 1255 genbits != alloc_freespace_genbits(*a)), c, 1256 + need_discard_freespace_key_bad, 1259 1257 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", 1260 1258 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), 1261 1259 bch2_btree_id_str(iter->btree_id), ··· 1324 1320 dev_exists = bch2_dev_exists2(c, k.k->p.inode); 1325 1321 if (!dev_exists) { 1326 1322 if (fsck_err_on(!dev_exists, c, 1323 + bucket_gens_to_invalid_dev, 1327 1324 "bucket_gens key for invalid device:\n %s", 1328 1325 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1329 1326 ret = bch2_btree_delete_at(trans, iter, 0); ··· 1335 1330 ca = bch_dev_bkey_exists(c, k.k->p.inode); 1336 1331 if (fsck_err_on(end <= ca->mi.first_bucket || 1337 1332 start >= ca->mi.nbuckets, c, 1333 + bucket_gens_to_invalid_buckets, 1338 1334 "bucket_gens key for invalid buckets:\n %s", 1339 1335 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1340 1336 ret = bch2_btree_delete_at(trans, iter, 0); ··· 1344 1338 1345 1339 for (b = start; b < ca->mi.first_bucket; b++) 1346 1340 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c, 1341 + bucket_gens_nonzero_for_invalid_buckets, 1347 1342 "bucket_gens key has nonzero gen for invalid bucket")) { 1348 1343 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1349 1344 need_update = true; ··· 1352 1345 1353 1346 for (b = ca->mi.nbuckets; b < end; b++) 1354 1347 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c, 1348 + bucket_gens_nonzero_for_invalid_buckets, 1355 1349 "bucket_gens key has nonzero gen for invalid bucket")) { 1356 1350 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1357 1351 need_update = true; ··· 1503 1495 return ret; 1504 1496 1505 1497 if (fsck_err_on(!a->io_time[READ], c, 1498 + alloc_key_cached_but_read_time_zero, 1506 1499 "cached bucket with read_time 0\n" 1507 1500 " %s", 1508 1501 (printbuf_reset(&buf), 1509 1502 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) || 1510 1503 fsck_err_on(lru_k.k->type != KEY_TYPE_set, c, 1504 + alloc_key_to_missing_lru_entry, 1511 1505 "missing lru entry\n" 1512 1506 " %s", 1513 1507 (printbuf_reset(&buf),
+5 -5
fs/bcachefs/alloc_background.h
··· 149 149 150 150 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); 151 151 152 - int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c, 152 + int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c, 153 153 enum bkey_invalid_flags, struct printbuf *); 154 - int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c, 154 + int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c, 155 155 enum bkey_invalid_flags, struct printbuf *); 156 - int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c, 156 + int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c, 157 157 enum bkey_invalid_flags, struct printbuf *); 158 - int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c, 158 + int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c, 159 159 enum bkey_invalid_flags, struct printbuf *); 160 160 void bch2_alloc_v4_swab(struct bkey_s); 161 161 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); ··· 193 193 .min_val_size = 48, \ 194 194 }) 195 195 196 - int bch2_bucket_gens_invalid(const struct bch_fs *, struct bkey_s_c, 196 + int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c, 197 197 enum bkey_invalid_flags, struct printbuf *); 198 198 void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 199 199
+12 -8
fs/bcachefs/backpointers.c
··· 37 37 return false; 38 38 } 39 39 40 - int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k, 40 + int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k, 41 41 enum bkey_invalid_flags flags, 42 42 struct printbuf *err) 43 43 { 44 44 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); 45 45 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p); 46 + int ret = 0; 46 47 47 - if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) { 48 - prt_str(err, "backpointer at wrong pos"); 49 - return -BCH_ERR_invalid_bkey; 50 - } 51 - 52 - return 0; 48 + bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)), 49 + c, err, 50 + backpointer_pos_wrong, 51 + "backpointer at wrong pos"); 52 + fsck_err: 53 + return ret; 53 54 } 54 55 55 56 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp) ··· 357 356 int ret = 0; 358 357 359 358 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c, 359 + backpointer_to_missing_device, 360 360 "backpointer for missing device:\n%s", 361 361 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 362 362 ret = bch2_btree_delete_at(trans, bp_iter, 0); ··· 371 369 goto out; 372 370 373 371 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c, 372 + backpointer_to_missing_alloc, 374 373 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", 375 374 alloc_iter.pos.inode, alloc_iter.pos.offset, 376 375 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { ··· 463 460 464 461 if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers || 465 462 c->opts.reconstruct_alloc || 466 - fsck_err(c, "%s", buf.buf)) 463 + fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf)) 467 464 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true); 468 465 469 466 goto out; ··· 796 793 } 797 794 798 795 if (fsck_err_on(!k.k, c, 796 + backpointer_to_missing_ptr, 799 797 "backpointer for missing extent\n %s", 800 798 (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) { 801 799 ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
+1 -1
fs/bcachefs/backpointers.h
··· 7 7 #include "buckets.h" 8 8 #include "super.h" 9 9 10 - int bch2_backpointer_invalid(const struct bch_fs *, struct bkey_s_c k, 10 + int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k, 11 11 enum bkey_invalid_flags, struct printbuf *); 12 12 void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *); 13 13 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
+64 -79
fs/bcachefs/bkey_methods.c
··· 26 26 NULL 27 27 }; 28 28 29 - static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k, 29 + static int deleted_key_invalid(struct bch_fs *c, struct bkey_s_c k, 30 30 enum bkey_invalid_flags flags, struct printbuf *err) 31 31 { 32 32 return 0; ··· 40 40 .key_invalid = deleted_key_invalid, \ 41 41 }) 42 42 43 - static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k, 43 + static int empty_val_key_invalid(struct bch_fs *c, struct bkey_s_c k, 44 44 enum bkey_invalid_flags flags, struct printbuf *err) 45 45 { 46 - if (bkey_val_bytes(k.k)) { 47 - prt_printf(err, "incorrect value size (%zu != 0)", 48 - bkey_val_bytes(k.k)); 49 - return -BCH_ERR_invalid_bkey; 50 - } 46 + int ret = 0; 51 47 52 - return 0; 48 + bkey_fsck_err_on(bkey_val_bytes(k.k), c, err, 49 + bkey_val_size_nonzero, 50 + "incorrect value size (%zu != 0)", 51 + bkey_val_bytes(k.k)); 52 + fsck_err: 53 + return ret; 53 54 } 54 55 55 56 #define bch2_bkey_ops_error ((struct bkey_ops) { \ 56 57 .key_invalid = empty_val_key_invalid, \ 57 58 }) 58 59 59 - static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k, 60 + static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k, 60 61 enum bkey_invalid_flags flags, struct printbuf *err) 61 62 { 62 63 return 0; ··· 72 71 .key_invalid = empty_val_key_invalid, \ 73 72 }) 74 73 75 - static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k, 74 + static int key_type_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k, 76 75 enum bkey_invalid_flags flags, struct printbuf *err) 77 76 { 78 77 return 0; ··· 93 92 .val_to_text = key_type_inline_data_to_text, \ 94 93 }) 95 94 96 - static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k, 97 - enum bkey_invalid_flags flags, struct printbuf *err) 98 - { 99 - if (bkey_val_bytes(k.k)) { 100 - prt_printf(err, "incorrect value size (%zu != %zu)", 101 - bkey_val_bytes(k.k), sizeof(struct bch_cookie)); 102 - return -BCH_ERR_invalid_bkey; 103 - } 104 - 105 - return 0; 106 - } 107 - 108 95 static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r) 109 96 { 110 97 bch2_key_resize(l.k, l.k->size + r.k->size); ··· 100 111 } 101 112 102 113 #define bch2_bkey_ops_set ((struct bkey_ops) { \ 103 - .key_invalid = key_type_set_invalid, \ 114 + .key_invalid = empty_val_key_invalid, \ 104 115 .key_merge = key_type_set_merge, \ 105 116 }) 106 117 ··· 118 129 struct printbuf *err) 119 130 { 120 131 const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type); 132 + int ret = 0; 121 133 122 - if (bkey_val_bytes(k.k) < ops->min_val_size) { 123 - prt_printf(err, "bad val size (%zu < %u)", 124 - bkey_val_bytes(k.k), ops->min_val_size); 125 - return -BCH_ERR_invalid_bkey; 126 - } 134 + bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size, c, err, 135 + bkey_val_size_too_small, 136 + "bad val size (%zu < %u)", 137 + bkey_val_bytes(k.k), ops->min_val_size); 127 138 128 139 if (!ops->key_invalid) 129 140 return 0; 130 141 131 - return ops->key_invalid(c, k, flags, err); 142 + ret = ops->key_invalid(c, k, flags, err); 143 + fsck_err: 144 + return ret; 132 145 } 133 146 134 147 static u64 bch2_key_types_allowed[] = { ··· 153 162 enum bkey_invalid_flags flags, 154 163 struct printbuf *err) 155 164 { 156 - if (k.k->u64s < BKEY_U64s) { 157 - prt_printf(err, "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s); 158 - return -BCH_ERR_invalid_bkey; 159 - } 165 + int ret = 0; 166 + 167 + bkey_fsck_err_on(k.k->u64s < BKEY_U64s, c, err, 168 + bkey_u64s_too_small, 169 + "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s); 160 170 161 171 if (type >= BKEY_TYPE_NR) 162 172 return 0; 163 173 164 - if (flags & BKEY_INVALID_COMMIT && 165 - !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) { 166 - prt_printf(err, "invalid key type for btree %s (%s)", 167 - bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]); 168 - return -BCH_ERR_invalid_bkey; 169 - } 174 + bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) && 175 + !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err, 176 + bkey_invalid_type_for_btree, 177 + "invalid key type for btree %s (%s)", 178 + bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]); 170 179 171 180 if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) { 172 - if (k.k->size == 0) { 173 - prt_printf(err, "size == 0"); 174 - return -BCH_ERR_invalid_bkey; 175 - } 181 + bkey_fsck_err_on(k.k->size == 0, c, err, 182 + bkey_extent_size_zero, 183 + "size == 0"); 176 184 177 - if (k.k->size > k.k->p.offset) { 178 - prt_printf(err, "size greater than offset (%u > %llu)", 179 - k.k->size, k.k->p.offset); 180 - return -BCH_ERR_invalid_bkey; 181 - } 185 + bkey_fsck_err_on(k.k->size > k.k->p.offset, c, err, 186 + bkey_extent_size_greater_than_offset, 187 + "size greater than offset (%u > %llu)", 188 + k.k->size, k.k->p.offset); 182 189 } else { 183 - if (k.k->size) { 184 - prt_printf(err, "size != 0"); 185 - return -BCH_ERR_invalid_bkey; 186 - } 190 + bkey_fsck_err_on(k.k->size, c, err, 191 + bkey_size_nonzero, 192 + "size != 0"); 187 193 } 188 194 189 195 if (type != BKEY_TYPE_btree) { 190 196 enum btree_id btree = type - 1; 191 197 192 - if (!btree_type_has_snapshots(btree) && 193 - k.k->p.snapshot) { 194 - prt_printf(err, "nonzero snapshot"); 195 - return -BCH_ERR_invalid_bkey; 196 - } 198 + bkey_fsck_err_on(!btree_type_has_snapshots(btree) && 199 + k.k->p.snapshot, c, err, 200 + bkey_snapshot_nonzero, 201 + "nonzero snapshot"); 197 202 198 - if (btree_type_has_snapshots(btree) && 199 - !k.k->p.snapshot) { 200 - prt_printf(err, "snapshot == 0"); 201 - return -BCH_ERR_invalid_bkey; 202 - } 203 + bkey_fsck_err_on(btree_type_has_snapshots(btree) && 204 + !k.k->p.snapshot, c, err, 205 + bkey_snapshot_zero, 206 + "snapshot == 0"); 203 207 204 - if (bkey_eq(k.k->p, POS_MAX)) { 205 - prt_printf(err, "key at POS_MAX"); 206 - return -BCH_ERR_invalid_bkey; 207 - } 208 + bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX), c, err, 209 + bkey_at_pos_max, 210 + "key at POS_MAX"); 208 211 } 209 - 210 - return 0; 212 + fsck_err: 213 + return ret; 211 214 } 212 215 213 216 int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k, ··· 213 228 bch2_bkey_val_invalid(c, k, flags, err); 214 229 } 215 230 216 - int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k, 217 - struct printbuf *err) 231 + int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b, 232 + struct bkey_s_c k, struct printbuf *err) 218 233 { 219 - if (bpos_lt(k.k->p, b->data->min_key)) { 220 - prt_printf(err, "key before start of btree node"); 221 - return -BCH_ERR_invalid_bkey; 222 - } 234 + int ret = 0; 223 235 224 - if (bpos_gt(k.k->p, b->data->max_key)) { 225 - prt_printf(err, "key past end of btree node"); 226 - return -BCH_ERR_invalid_bkey; 227 - } 236 + bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key), c, err, 237 + bkey_before_start_of_btree_node, 238 + "key before start of btree node"); 228 239 229 - return 0; 240 + bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key), c, err, 241 + bkey_after_end_of_btree_node, 242 + "key past end of btree node"); 243 + fsck_err: 244 + return ret; 230 245 } 231 246 232 247 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
+3 -2
fs/bcachefs/bkey_methods.h
··· 21 21 * being read or written; more aggressive checks can be enabled when rw == WRITE. 22 22 */ 23 23 struct bkey_ops { 24 - int (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k, 24 + int (*key_invalid)(struct bch_fs *c, struct bkey_s_c k, 25 25 enum bkey_invalid_flags flags, struct printbuf *err); 26 26 void (*val_to_text)(struct printbuf *, struct bch_fs *, 27 27 struct bkey_s_c); ··· 55 55 enum bkey_invalid_flags, struct printbuf *); 56 56 int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type, 57 57 enum bkey_invalid_flags, struct printbuf *); 58 - int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *); 58 + int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *, 59 + struct bkey_s_c, struct printbuf *); 59 60 60 61 void bch2_bpos_to_text(struct printbuf *, struct bpos); 61 62 void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
+80 -44
fs/bcachefs/btree_gc.c
··· 95 95 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k)); 96 96 97 97 if (__fsck_err(c, 98 - FSCK_CAN_FIX| 99 - FSCK_CAN_IGNORE| 100 - FSCK_NO_RATELIMIT, 101 - "btree node with incorrect min_key at btree %s level %u:\n" 102 - " prev %s\n" 103 - " cur %s", 104 - bch2_btree_id_str(b->c.btree_id), b->c.level, 105 - buf1.buf, buf2.buf) && 106 - should_restart_for_topology_repair(c)) { 98 + FSCK_CAN_FIX| 99 + FSCK_CAN_IGNORE| 100 + FSCK_NO_RATELIMIT, 101 + btree_node_topology_bad_min_key, 102 + "btree node with incorrect min_key at btree %s level %u:\n" 103 + " prev %s\n" 104 + " cur %s", 105 + bch2_btree_id_str(b->c.btree_id), b->c.level, 106 + buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) { 107 107 bch_info(c, "Halting mark and sweep to start topology repair pass"); 108 108 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology); 109 109 goto err; ··· 122 122 bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k)); 123 123 bch2_bpos_to_text(&buf2, node_end); 124 124 125 - if (__fsck_err(c, 126 - FSCK_CAN_FIX| 127 - FSCK_CAN_IGNORE| 128 - FSCK_NO_RATELIMIT, 125 + if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT, 126 + btree_node_topology_bad_max_key, 129 127 "btree node with incorrect max_key at btree %s level %u:\n" 130 128 " %s\n" 131 129 " expected %s", ··· 285 287 286 288 if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key, 287 289 cur->data->min_key), c, 290 + btree_node_topology_overwritten_by_next_node, 288 291 "btree node overwritten by next node at btree %s level %u:\n" 289 292 " node %s\n" 290 293 " next %s", ··· 297 298 298 299 if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p, 299 300 bpos_predecessor(cur->data->min_key)), c, 301 + btree_node_topology_bad_max_key, 300 302 "btree node with incorrect max_key at btree %s level %u:\n" 301 303 " node %s\n" 302 304 " next %s", ··· 310 310 311 311 if (mustfix_fsck_err_on(bpos_ge(expected_start, 312 312 cur->data->max_key), c, 313 + btree_node_topology_overwritten_by_prev_node, 313 314 "btree node overwritten by prev node at btree %s level %u:\n" 314 315 " prev %s\n" 315 316 " node %s", ··· 321 320 } 322 321 323 322 if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c, 323 + btree_node_topology_bad_min_key, 324 324 "btree node with incorrect min_key at btree %s level %u:\n" 325 325 " prev %s\n" 326 326 " node %s", ··· 346 344 bch2_bpos_to_text(&buf2, b->key.k.p); 347 345 348 346 if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c, 347 + btree_node_topology_bad_max_key, 349 348 "btree node with incorrect max_key at btree %s level %u:\n" 350 349 " %s\n" 351 350 " expected %s", ··· 399 396 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k)); 400 397 401 398 if (mustfix_fsck_err_on(ret == -EIO, c, 399 + btree_node_unreadable, 402 400 "Topology repair: unreadable btree node at btree %s level %u:\n" 403 401 " %s", 404 402 bch2_btree_id_str(b->c.btree_id), ··· 508 504 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); 509 505 510 506 if (mustfix_fsck_err_on(!have_child, c, 507 + btree_node_topology_interior_node_empty, 511 508 "empty interior btree node at btree %s level %u\n" 512 509 " %s", 513 510 bch2_btree_id_str(b->c.btree_id), ··· 587 582 588 583 if (!g->gen_valid && 589 584 (c->opts.reconstruct_alloc || 590 - fsck_err(c, "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" 585 + fsck_err(c, ptr_to_missing_alloc_key, 586 + "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" 591 587 "while marking %s", 592 588 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 593 589 bch2_data_types[ptr_data_type(k->k, &p.ptr)], ··· 605 599 606 600 if (gen_cmp(p.ptr.gen, g->gen) > 0 && 607 601 (c->opts.reconstruct_alloc || 608 - fsck_err(c, "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" 602 + fsck_err(c, ptr_gen_newer_than_bucket_gen, 603 + "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" 609 604 "while marking %s", 610 605 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 611 606 bch2_data_types[ptr_data_type(k->k, &p.ptr)], ··· 627 620 628 621 if (gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX && 629 622 (c->opts.reconstruct_alloc || 630 - fsck_err(c, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 623 + fsck_err(c, ptr_gen_newer_than_bucket_gen, 624 + "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 631 625 "while marking %s", 632 626 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen, 633 627 bch2_data_types[ptr_data_type(k->k, &p.ptr)], ··· 639 631 640 632 if (!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0 && 641 633 (c->opts.reconstruct_alloc || 642 - fsck_err(c, "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" 634 + fsck_err(c, stale_dirty_ptr, 635 + "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" 643 636 "while marking %s", 644 637 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), 645 638 bch2_data_types[ptr_data_type(k->k, &p.ptr)], ··· 654 645 655 646 if (fsck_err_on(bucket_data_type(g->data_type) && 656 647 bucket_data_type(g->data_type) != data_type, c, 648 + ptr_bucket_data_type_mismatch, 657 649 "bucket %u:%zu different types of data in same bucket: %s, %s\n" 658 650 "while marking %s", 659 651 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), ··· 674 664 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx); 675 665 676 666 if (fsck_err_on(!m || !m->alive, c, 667 + ptr_to_missing_stripe, 677 668 "pointer to nonexistent stripe %llu\n" 678 669 "while marking %s", 679 670 (u64) p.ec.idx, ··· 683 672 do_update = true; 684 673 685 674 if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c, 675 + ptr_to_incorrect_stripe, 686 676 "pointer does not match stripe %llu\n" 687 677 "while marking %s", 688 678 (u64) p.ec.idx, ··· 823 811 goto err; 824 812 825 813 if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c, 814 + bkey_version_in_future, 826 815 "key version number higher than recorded: %llu > %llu", 827 816 k->k->version.lo, 828 817 atomic64_read(&c->key_version))) ··· 981 968 FSCK_CAN_FIX| 982 969 FSCK_CAN_IGNORE| 983 970 FSCK_NO_RATELIMIT, 971 + btree_node_read_error, 984 972 "Unreadable btree node at btree %s level %u:\n" 985 973 " %s", 986 974 bch2_btree_id_str(b->c.btree_id), ··· 1039 1025 printbuf_reset(&buf); 1040 1026 bch2_bpos_to_text(&buf, b->data->min_key); 1041 1027 if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c, 1028 + btree_root_bad_min_key, 1042 1029 "btree root with incorrect min_key: %s", buf.buf)) { 1043 1030 bch_err(c, "repair unimplemented"); 1044 1031 ret = -BCH_ERR_fsck_repair_unimplemented; ··· 1049 1034 printbuf_reset(&buf); 1050 1035 bch2_bpos_to_text(&buf, b->data->max_key); 1051 1036 if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c, 1037 + btree_root_bad_max_key, 1052 1038 "btree root with incorrect max_key: %s", buf.buf)) { 1053 1039 bch_err(c, "repair unimplemented"); 1054 1040 ret = -BCH_ERR_fsck_repair_unimplemented; ··· 1223 1207 1224 1208 percpu_down_write(&c->mark_lock); 1225 1209 1226 - #define copy_field(_f, _msg, ...) \ 1210 + #define copy_field(_err, _f, _msg, ...) \ 1227 1211 if (dst->_f != src->_f && \ 1228 1212 (!verify || \ 1229 - fsck_err(c, _msg ": got %llu, should be %llu" \ 1213 + fsck_err(c, _err, _msg ": got %llu, should be %llu" \ 1230 1214 , ##__VA_ARGS__, dst->_f, src->_f))) \ 1231 1215 dst->_f = src->_f 1232 - #define copy_dev_field(_f, _msg, ...) \ 1233 - copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__) 1234 - #define copy_fs_field(_f, _msg, ...) \ 1235 - copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__) 1216 + #define copy_dev_field(_err, _f, _msg, ...) \ 1217 + copy_field(_err, _f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__) 1218 + #define copy_fs_field(_err, _f, _msg, ...) \ 1219 + copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__) 1236 1220 1237 1221 for (i = 0; i < ARRAY_SIZE(c->usage); i++) 1238 1222 bch2_fs_usage_acc_to_base(c, i); ··· 1243 1227 bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc, 1244 1228 dev_usage_u64s()); 1245 1229 1246 - copy_dev_field(buckets_ec, "buckets_ec"); 1247 - 1248 1230 for (i = 0; i < BCH_DATA_NR; i++) { 1249 - copy_dev_field(d[i].buckets, "%s buckets", bch2_data_types[i]); 1250 - copy_dev_field(d[i].sectors, "%s sectors", bch2_data_types[i]); 1251 - copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]); 1231 + copy_dev_field(dev_usage_buckets_wrong, 1232 + d[i].buckets, "%s buckets", bch2_data_types[i]); 1233 + copy_dev_field(dev_usage_sectors_wrong, 1234 + d[i].sectors, "%s sectors", bch2_data_types[i]); 1235 + copy_dev_field(dev_usage_fragmented_wrong, 1236 + d[i].fragmented, "%s fragmented", bch2_data_types[i]); 1252 1237 } 1238 + 1239 + copy_dev_field(dev_usage_buckets_ec_wrong, 1240 + buckets_ec, "buckets_ec"); 1253 1241 } 1254 1242 1255 1243 { ··· 1262 1242 struct bch_fs_usage *src = (void *) 1263 1243 bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr); 1264 1244 1265 - copy_fs_field(hidden, "hidden"); 1266 - copy_fs_field(btree, "btree"); 1245 + copy_fs_field(fs_usage_hidden_wrong, 1246 + hidden, "hidden"); 1247 + copy_fs_field(fs_usage_btree_wrong, 1248 + btree, "btree"); 1267 1249 1268 1250 if (!metadata_only) { 1269 - copy_fs_field(data, "data"); 1270 - copy_fs_field(cached, "cached"); 1271 - copy_fs_field(reserved, "reserved"); 1272 - copy_fs_field(nr_inodes,"nr_inodes"); 1251 + copy_fs_field(fs_usage_data_wrong, 1252 + data, "data"); 1253 + copy_fs_field(fs_usage_cached_wrong, 1254 + cached, "cached"); 1255 + copy_fs_field(fs_usage_reserved_wrong, 1256 + reserved, "reserved"); 1257 + copy_fs_field(fs_usage_nr_inodes_wrong, 1258 + nr_inodes,"nr_inodes"); 1273 1259 1274 1260 for (i = 0; i < BCH_REPLICAS_MAX; i++) 1275 - copy_fs_field(persistent_reserved[i], 1261 + copy_fs_field(fs_usage_persistent_reserved_wrong, 1262 + persistent_reserved[i], 1276 1263 "persistent_reserved[%i]", i); 1277 1264 } 1278 1265 ··· 1295 1268 printbuf_reset(&buf); 1296 1269 bch2_replicas_entry_to_text(&buf, e); 1297 1270 1298 - copy_fs_field(replicas[i], "%s", buf.buf); 1271 + copy_fs_field(fs_usage_replicas_wrong, 1272 + replicas[i], "%s", buf.buf); 1299 1273 } 1300 1274 } 1301 1275 ··· 1432 1404 1433 1405 if (c->opts.reconstruct_alloc || 1434 1406 fsck_err_on(new.data_type != gc.data_type, c, 1407 + alloc_key_data_type_wrong, 1435 1408 "bucket %llu:%llu gen %u has wrong data_type" 1436 1409 ": got %s, should be %s", 1437 1410 iter->pos.inode, iter->pos.offset, ··· 1441 1412 bch2_data_types[gc.data_type])) 1442 1413 new.data_type = gc.data_type; 1443 1414 1444 - #define copy_bucket_field(_f) \ 1415 + #define copy_bucket_field(_errtype, _f) \ 1445 1416 if (c->opts.reconstruct_alloc || \ 1446 - fsck_err_on(new._f != gc._f, c, \ 1417 + fsck_err_on(new._f != gc._f, c, _errtype, \ 1447 1418 "bucket %llu:%llu gen %u data type %s has wrong " #_f \ 1448 1419 ": got %u, should be %u", \ 1449 1420 iter->pos.inode, iter->pos.offset, \ ··· 1452 1423 new._f, gc._f)) \ 1453 1424 new._f = gc._f; \ 1454 1425 1455 - copy_bucket_field(gen); 1456 - copy_bucket_field(dirty_sectors); 1457 - copy_bucket_field(cached_sectors); 1458 - copy_bucket_field(stripe_redundancy); 1459 - copy_bucket_field(stripe); 1426 + copy_bucket_field(alloc_key_gen_wrong, 1427 + gen); 1428 + copy_bucket_field(alloc_key_dirty_sectors_wrong, 1429 + dirty_sectors); 1430 + copy_bucket_field(alloc_key_cached_sectors_wrong, 1431 + cached_sectors); 1432 + copy_bucket_field(alloc_key_stripe_wrong, 1433 + stripe); 1434 + copy_bucket_field(alloc_key_stripe_redundancy_wrong, 1435 + stripe_redundancy); 1460 1436 #undef copy_bucket_field 1461 1437 1462 1438 if (!bch2_alloc_v4_cmp(*old, new)) ··· 1618 1584 } 1619 1585 1620 1586 if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c, 1587 + reflink_v_refcount_wrong, 1621 1588 "reflink key has wrong refcount:\n" 1622 1589 " %s\n" 1623 1590 " should be %u", ··· 1744 1709 if (bad) 1745 1710 bch2_bkey_val_to_text(&buf, c, k); 1746 1711 1747 - if (fsck_err_on(bad, c, "%s", buf.buf)) { 1712 + if (fsck_err_on(bad, c, stripe_sector_count_wrong, 1713 + "%s", buf.buf)) { 1748 1714 struct bkey_i_stripe *new; 1749 1715 1750 1716 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+125 -49
fs/bcachefs/btree_io.c
··· 530 530 prt_str(out, ": "); 531 531 } 532 532 533 - __printf(8, 9) 533 + __printf(9, 10) 534 534 static int __btree_err(int ret, 535 535 struct bch_fs *c, 536 536 struct bch_dev *ca, ··· 538 538 struct bset *i, 539 539 int write, 540 540 bool have_retry, 541 + enum bch_sb_error_id err_type, 541 542 const char *fmt, ...) 542 543 { 543 544 struct printbuf out = PRINTBUF; ··· 563 562 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry) 564 563 ret = -BCH_ERR_btree_node_read_err_bad_node; 565 564 565 + if (ret != -BCH_ERR_btree_node_read_err_fixable) 566 + bch2_sb_error_count(c, err_type); 567 + 566 568 switch (ret) { 567 569 case -BCH_ERR_btree_node_read_err_fixable: 568 - mustfix_fsck_err(c, "%s", out.buf); 570 + ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf); 571 + if (ret != -BCH_ERR_fsck_fix && 572 + ret != -BCH_ERR_fsck_ignore) 573 + goto fsck_err; 569 574 ret = -BCH_ERR_fsck_fix; 570 575 break; 571 576 case -BCH_ERR_btree_node_read_err_want_retry: ··· 596 589 return ret; 597 590 } 598 591 599 - #define btree_err(type, c, ca, b, i, msg, ...) \ 592 + #define btree_err(type, c, ca, b, i, _err_type, msg, ...) \ 600 593 ({ \ 601 - int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\ 594 + int _ret = __btree_err(type, c, ca, b, i, write, have_retry, \ 595 + BCH_FSCK_ERR_##_err_type, \ 596 + msg, ##__VA_ARGS__); \ 602 597 \ 603 598 if (_ret != -BCH_ERR_fsck_fix) { \ 604 599 ret = _ret; \ ··· 675 666 int ret = 0; 676 667 677 668 btree_err_on(!bch2_version_compatible(version), 678 - -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i, 669 + -BCH_ERR_btree_node_read_err_incompatible, 670 + c, ca, b, i, 671 + btree_node_unsupported_version, 679 672 "unsupported bset version %u.%u", 680 673 BCH_VERSION_MAJOR(version), 681 674 BCH_VERSION_MINOR(version)); 682 675 683 676 if (btree_err_on(version < c->sb.version_min, 684 - -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, 677 + -BCH_ERR_btree_node_read_err_fixable, 678 + c, NULL, b, i, 679 + btree_node_bset_older_than_sb_min, 685 680 "bset version %u older than superblock version_min %u", 686 681 version, c->sb.version_min)) { 687 682 mutex_lock(&c->sb_lock); ··· 696 683 697 684 if (btree_err_on(BCH_VERSION_MAJOR(version) > 698 685 BCH_VERSION_MAJOR(c->sb.version), 699 - -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, 686 + -BCH_ERR_btree_node_read_err_fixable, 687 + c, NULL, b, i, 688 + btree_node_bset_newer_than_sb, 700 689 "bset version %u newer than superblock version %u", 701 690 version, c->sb.version)) { 702 691 mutex_lock(&c->sb_lock); ··· 708 693 } 709 694 710 695 btree_err_on(BSET_SEPARATE_WHITEOUTS(i), 711 - -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i, 696 + -BCH_ERR_btree_node_read_err_incompatible, 697 + c, ca, b, i, 698 + btree_node_unsupported_version, 712 699 "BSET_SEPARATE_WHITEOUTS no longer supported"); 713 700 714 701 if (btree_err_on(offset + sectors > btree_sectors(c), 715 - -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i, 702 + -BCH_ERR_btree_node_read_err_fixable, 703 + c, ca, b, i, 704 + bset_past_end_of_btree_node, 716 705 "bset past end of btree node")) { 717 706 i->u64s = 0; 718 707 ret = 0; ··· 724 705 } 725 706 726 707 btree_err_on(offset && !i->u64s, 727 - -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i, 708 + -BCH_ERR_btree_node_read_err_fixable, 709 + c, ca, b, i, 710 + bset_empty, 728 711 "empty bset"); 729 712 730 - btree_err_on(BSET_OFFSET(i) && 731 - BSET_OFFSET(i) != offset, 732 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i, 713 + btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset, 714 + -BCH_ERR_btree_node_read_err_want_retry, 715 + c, ca, b, i, 716 + bset_wrong_sector_offset, 733 717 "bset at wrong sector offset"); 734 718 735 719 if (!offset) { ··· 746 724 747 725 /* XXX endianness */ 748 726 btree_err_on(bp->seq != bn->keys.seq, 749 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, 727 + -BCH_ERR_btree_node_read_err_must_retry, 728 + c, ca, b, NULL, 729 + bset_bad_seq, 750 730 "incorrect sequence number (wrong btree node)"); 751 731 } 752 732 753 733 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id, 754 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i, 734 + -BCH_ERR_btree_node_read_err_must_retry, 735 + c, ca, b, i, 736 + btree_node_bad_btree, 755 737 "incorrect btree id"); 756 738 757 739 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level, 758 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i, 740 + -BCH_ERR_btree_node_read_err_must_retry, 741 + c, ca, b, i, 742 + btree_node_bad_level, 759 743 "incorrect level"); 760 744 761 745 if (!write) ··· 778 750 } 779 751 780 752 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key), 781 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, 753 + -BCH_ERR_btree_node_read_err_must_retry, 754 + c, ca, b, NULL, 755 + btree_node_bad_min_key, 782 756 "incorrect min_key: got %s should be %s", 783 757 (printbuf_reset(&buf1), 784 758 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf), ··· 789 759 } 790 760 791 761 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p), 792 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i, 762 + -BCH_ERR_btree_node_read_err_must_retry, 763 + c, ca, b, i, 764 + btree_node_bad_max_key, 793 765 "incorrect max key %s", 794 766 (printbuf_reset(&buf1), 795 767 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf)); ··· 801 769 BSET_BIG_ENDIAN(i), write, bn); 802 770 803 771 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1), 804 - -BCH_ERR_btree_node_read_err_bad_node, c, ca, b, i, 772 + -BCH_ERR_btree_node_read_err_bad_node, 773 + c, ca, b, i, 774 + btree_node_bad_format, 805 775 "invalid bkey format: %s\n %s", buf1.buf, 806 776 (printbuf_reset(&buf2), 807 777 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf)); ··· 826 792 struct printbuf *err) 827 793 { 828 794 return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?: 829 - (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?: 795 + (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?: 830 796 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0); 831 797 } 832 798 ··· 847 813 struct bkey tmp; 848 814 849 815 if (btree_err_on(bkey_p_next(k) > vstruct_last(i), 850 - -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, 816 + -BCH_ERR_btree_node_read_err_fixable, 817 + c, NULL, b, i, 818 + btree_node_bkey_past_bset_end, 851 819 "key extends past end of bset")) { 852 820 i->u64s = cpu_to_le16((u64 *) k - i->_data); 853 821 break; 854 822 } 855 823 856 824 if (btree_err_on(k->format > KEY_FORMAT_CURRENT, 857 - -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, 825 + -BCH_ERR_btree_node_read_err_fixable, 826 + c, NULL, b, i, 827 + btree_node_bkey_bad_format, 858 828 "invalid bkey format %u", k->format)) { 859 829 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 860 830 memmove_u64s_down(k, bkey_p_next(k), ··· 877 839 printbuf_reset(&buf); 878 840 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) { 879 841 printbuf_reset(&buf); 880 - prt_printf(&buf, "invalid bkey: "); 881 842 bset_key_invalid(c, b, u.s_c, updated_range, write, &buf); 882 843 prt_printf(&buf, "\n "); 883 844 bch2_bkey_val_to_text(&buf, c, u.s_c); 884 845 885 - btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf); 846 + btree_err(-BCH_ERR_btree_node_read_err_fixable, 847 + c, NULL, b, i, 848 + btree_node_bad_bkey, 849 + "invalid bkey: %s", buf.buf); 886 850 887 851 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 888 852 memmove_u64s_down(k, bkey_p_next(k), ··· 908 868 909 869 bch2_dump_bset(c, b, i, 0); 910 870 911 - if (btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf)) { 871 + if (btree_err(-BCH_ERR_btree_node_read_err_fixable, 872 + c, NULL, b, i, 873 + btree_node_bkey_out_of_order, 874 + "%s", buf.buf)) { 912 875 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); 913 876 memmove_u64s_down(k, bkey_p_next(k), 914 877 (u64 *) vstruct_end(i) - (u64 *) k); ··· 952 909 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2); 953 910 954 911 if (bch2_meta_read_fault("btree")) 955 - btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, 912 + btree_err(-BCH_ERR_btree_node_read_err_must_retry, 913 + c, ca, b, NULL, 914 + btree_node_fault_injected, 956 915 "dynamic fault"); 957 916 958 917 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c), 959 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, 918 + -BCH_ERR_btree_node_read_err_must_retry, 919 + c, ca, b, NULL, 920 + btree_node_bad_magic, 960 921 "bad magic: want %llx, got %llx", 961 922 bset_magic(c), le64_to_cpu(b->data->magic)); 962 - 963 - btree_err_on(!b->data->keys.seq, 964 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, 965 - "bad btree header: seq 0"); 966 923 967 924 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) { 968 925 struct bch_btree_ptr_v2 *bp = 969 926 &bkey_i_to_btree_ptr_v2(&b->key)->v; 970 927 971 928 btree_err_on(b->data->keys.seq != bp->seq, 972 - -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL, 929 + -BCH_ERR_btree_node_read_err_must_retry, 930 + c, ca, b, NULL, 931 + btree_node_bad_seq, 973 932 "got wrong btree node (seq %llx want %llx)", 974 933 b->data->keys.seq, bp->seq); 934 + } else { 935 + btree_err_on(!b->data->keys.seq, 936 + -BCH_ERR_btree_node_read_err_must_retry, 937 + c, ca, b, NULL, 938 + btree_node_bad_seq, 939 + "bad btree header: seq 0"); 975 940 } 976 941 977 942 while (b->written < (ptr_written ?: btree_sectors(c))) { ··· 992 941 i = &b->data->keys; 993 942 994 943 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), 995 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i, 996 - "unknown checksum type %llu", 997 - BSET_CSUM_TYPE(i)); 944 + -BCH_ERR_btree_node_read_err_want_retry, 945 + c, ca, b, i, 946 + bset_unknown_csum, 947 + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); 998 948 999 949 nonce = btree_nonce(i, b->written << 9); 1000 950 ··· 1005 953 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1006 954 1007 955 btree_err_on(csum_bad, 1008 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i, 956 + -BCH_ERR_btree_node_read_err_want_retry, 957 + c, ca, b, i, 958 + bset_bad_csum, 1009 959 "invalid checksum"); 1010 960 1011 961 ret = bset_encrypt(c, i, b->written << 9); ··· 1017 963 1018 964 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) && 1019 965 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data), 1020 - -BCH_ERR_btree_node_read_err_incompatible, c, NULL, b, NULL, 966 + -BCH_ERR_btree_node_read_err_incompatible, 967 + c, NULL, b, NULL, 968 + btree_node_unsupported_version, 1021 969 "btree node does not have NEW_EXTENT_OVERWRITE set"); 1022 970 1023 971 sectors = vstruct_sectors(b->data, c->block_bits); ··· 1031 975 break; 1032 976 1033 977 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)), 1034 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i, 1035 - "unknown checksum type %llu", 1036 - BSET_CSUM_TYPE(i)); 978 + -BCH_ERR_btree_node_read_err_want_retry, 979 + c, ca, b, i, 980 + bset_unknown_csum, 981 + "unknown checksum type %llu", BSET_CSUM_TYPE(i)); 1037 982 1038 983 nonce = btree_nonce(i, b->written << 9); 1039 984 csum_bad = bch2_crc_cmp(bne->csum, ··· 1043 986 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum); 1044 987 1045 988 btree_err_on(csum_bad, 1046 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i, 989 + -BCH_ERR_btree_node_read_err_want_retry, 990 + c, ca, b, i, 991 + bset_bad_csum, 1047 992 "invalid checksum"); 1048 993 1049 994 ret = bset_encrypt(c, i, b->written << 9); ··· 1078 1019 true); 1079 1020 1080 1021 btree_err_on(blacklisted && first, 1081 - -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i, 1022 + -BCH_ERR_btree_node_read_err_fixable, 1023 + c, ca, b, i, 1024 + bset_blacklisted_journal_seq, 1082 1025 "first btree node bset has blacklisted journal seq (%llu)", 1083 1026 le64_to_cpu(i->journal_seq)); 1084 1027 1085 1028 btree_err_on(blacklisted && ptr_written, 1086 - -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i, 1029 + -BCH_ERR_btree_node_read_err_fixable, 1030 + c, ca, b, i, 1031 + first_bset_blacklisted_journal_seq, 1087 1032 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u", 1088 1033 le64_to_cpu(i->journal_seq), 1089 1034 b->written, b->written + sectors, ptr_written); ··· 1104 1041 1105 1042 if (ptr_written) { 1106 1043 btree_err_on(b->written < ptr_written, 1107 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL, 1044 + -BCH_ERR_btree_node_read_err_want_retry, 1045 + c, ca, b, NULL, 1046 + btree_node_data_missing, 1108 1047 "btree node data missing: expected %u sectors, found %u", 1109 1048 ptr_written, b->written); 1110 1049 } else { ··· 1117 1052 !bch2_journal_seq_is_blacklisted(c, 1118 1053 le64_to_cpu(bne->keys.journal_seq), 1119 1054 true), 1120 - -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL, 1055 + -BCH_ERR_btree_node_read_err_want_retry, 1056 + c, ca, b, NULL, 1057 + btree_node_bset_after_end, 1121 1058 "found bset signature after last bset"); 1122 1059 } 1123 1060 ··· 1161 1094 prt_printf(&buf, "\n "); 1162 1095 bch2_bkey_val_to_text(&buf, c, u.s_c); 1163 1096 1164 - btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf); 1097 + btree_err(-BCH_ERR_btree_node_read_err_fixable, 1098 + c, NULL, b, i, 1099 + btree_node_bad_bkey, 1100 + "%s", buf.buf); 1165 1101 1166 1102 btree_keys_account_key_drop(&b->nr, 0, k); 1167 1103 ··· 1390 1320 } 1391 1321 1392 1322 written2 = btree_node_sectors_written(c, ra->buf[i]); 1393 - if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL, 1323 + if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, 1324 + c, NULL, b, NULL, 1325 + btree_node_replicas_sectors_written_mismatch, 1394 1326 "btree node sectors written mismatch: %u != %u", 1395 1327 written, written2) || 1396 1328 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]), 1397 - -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL, 1329 + -BCH_ERR_btree_node_read_err_fixable, 1330 + c, NULL, b, NULL, 1331 + btree_node_bset_after_end, 1398 1332 "found bset signature after last bset") || 1399 1333 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9), 1400 - -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL, 1334 + -BCH_ERR_btree_node_read_err_fixable, 1335 + c, NULL, b, NULL, 1336 + btree_node_replicas_data_mismatch, 1401 1337 "btree node replicas content mismatch")) 1402 1338 dump_bset_maps = true; 1403 1339
+2 -2
fs/bcachefs/btree_update_interior.c
··· 1274 1274 1275 1275 if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert), 1276 1276 btree_node_type(b), WRITE, &buf) ?: 1277 - bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf)) { 1277 + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) { 1278 1278 printbuf_reset(&buf); 1279 1279 prt_printf(&buf, "inserting invalid bkey\n "); 1280 1280 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert)); 1281 1281 prt_printf(&buf, "\n "); 1282 1282 bch2_bkey_invalid(c, bkey_i_to_s_c(insert), 1283 1283 btree_node_type(b), WRITE, &buf); 1284 - bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf); 1284 + bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf); 1285 1285 1286 1286 bch2_fs_inconsistent(c, "%s", buf.buf); 1287 1287 dump_stack();
+10 -3
fs/bcachefs/buckets.c
··· 370 370 371 371 idx = bch2_replicas_entry_idx(c, r); 372 372 if (idx < 0 && 373 - fsck_err(c, "no replicas entry\n" 374 - " while marking %s", 373 + fsck_err(c, ptr_to_missing_replicas_entry, 374 + "no replicas entry\n while marking %s", 375 375 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 376 376 percpu_up_read(&c->mark_lock); 377 377 ret = bch2_mark_replicas(c, r); ··· 695 695 696 696 if (gen_after(ptr->gen, b_gen)) { 697 697 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 698 + BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, 698 699 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n" 699 700 "while marking %s", 700 701 ptr->dev, bucket_nr, b_gen, ··· 708 707 709 708 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) { 710 709 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 710 + BCH_FSCK_ERR_ptr_too_stale, 711 711 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" 712 712 "while marking %s", 713 713 ptr->dev, bucket_nr, b_gen, ··· 722 720 723 721 if (b_gen != ptr->gen && !ptr->cached) { 724 722 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 723 + BCH_FSCK_ERR_stale_dirty_ptr, 725 724 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n" 726 725 "while marking %s", 727 726 ptr->dev, bucket_nr, b_gen, ··· 744 741 ptr_data_type && 745 742 bucket_data_type != ptr_data_type) { 746 743 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 744 + BCH_FSCK_ERR_ptr_bucket_data_type_mismatch, 747 745 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" 748 746 "while marking %s", 749 747 ptr->dev, bucket_nr, b_gen, ··· 758 754 759 755 if ((u64) bucket_sectors + sectors > U32_MAX) { 760 756 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 757 + BCH_FSCK_ERR_bucket_sector_count_overflow, 761 758 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n" 762 759 "while marking %s", 763 760 ptr->dev, bucket_nr, b_gen, ··· 1200 1195 *idx = r->offset; 1201 1196 return 0; 1202 1197 not_found: 1203 - if (fsck_err(c, "pointer to missing indirect extent\n" 1198 + if (fsck_err(c, reflink_p_to_missing_reflink_v, 1199 + "pointer to missing indirect extent\n" 1204 1200 " %s\n" 1205 1201 " missing range %llu-%llu", 1206 1202 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf), ··· 1863 1857 1864 1858 if (a->v.data_type && type && a->v.data_type != type) { 1865 1859 bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, 1860 + BCH_FSCK_ERR_bucket_metadata_type_mismatch, 1866 1861 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" 1867 1862 "while marking %s", 1868 1863 iter.pos.inode, iter.pos.offset, a->v.gen,
+29 -39
fs/bcachefs/dirent.c
··· 97 97 .is_visible = dirent_is_visible, 98 98 }; 99 99 100 - int bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k, 100 + int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k, 101 101 enum bkey_invalid_flags flags, 102 102 struct printbuf *err) 103 103 { 104 104 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k); 105 105 struct qstr d_name = bch2_dirent_get_name(d); 106 + int ret = 0; 106 107 107 - if (!d_name.len) { 108 - prt_printf(err, "empty name"); 109 - return -BCH_ERR_invalid_bkey; 110 - } 108 + bkey_fsck_err_on(!d_name.len, c, err, 109 + dirent_empty_name, 110 + "empty name"); 111 111 112 - if (bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len)) { 113 - prt_printf(err, "value too big (%zu > %u)", 114 - bkey_val_u64s(k.k), dirent_val_u64s(d_name.len)); 115 - return -BCH_ERR_invalid_bkey; 116 - } 112 + bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len), c, err, 113 + dirent_val_too_big, 114 + "value too big (%zu > %u)", 115 + bkey_val_u64s(k.k), dirent_val_u64s(d_name.len)); 117 116 118 117 /* 119 118 * Check new keys don't exceed the max length 120 119 * (older keys may be larger.) 121 120 */ 122 - if ((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX) { 123 - prt_printf(err, "dirent name too big (%u > %u)", 124 - d_name.len, BCH_NAME_MAX); 125 - return -BCH_ERR_invalid_bkey; 126 - } 121 + bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX, c, err, 122 + dirent_name_too_long, 123 + "dirent name too big (%u > %u)", 124 + d_name.len, BCH_NAME_MAX); 127 125 128 - if (d_name.len != strnlen(d_name.name, d_name.len)) { 129 - prt_printf(err, "dirent has stray data after name's NUL"); 130 - return -BCH_ERR_invalid_bkey; 131 - } 126 + bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len), c, err, 127 + dirent_name_embedded_nul, 128 + "dirent has stray data after name's NUL"); 132 129 133 - if (d_name.len == 1 && !memcmp(d_name.name, ".", 1)) { 134 - prt_printf(err, "invalid name"); 135 - return -BCH_ERR_invalid_bkey; 136 - } 130 + bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) || 131 + (d_name.len == 2 && !memcmp(d_name.name, "..", 2)), c, err, 132 + dirent_name_dot_or_dotdot, 133 + "invalid name"); 137 134 138 - if (d_name.len == 2 && !memcmp(d_name.name, "..", 2)) { 139 - prt_printf(err, "invalid name"); 140 - return -BCH_ERR_invalid_bkey; 141 - } 135 + bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len), c, err, 136 + dirent_name_has_slash, 137 + "name with /"); 142 138 143 - if (memchr(d_name.name, '/', d_name.len)) { 144 - prt_printf(err, "invalid name"); 145 - return -BCH_ERR_invalid_bkey; 146 - } 147 - 148 - if (d.v->d_type != DT_SUBVOL && 149 - le64_to_cpu(d.v->d_inum) == d.k->p.inode) { 150 - prt_printf(err, "dirent points to own directory"); 151 - return -BCH_ERR_invalid_bkey; 152 - } 153 - 154 - return 0; 139 + bkey_fsck_err_on(d.v->d_type != DT_SUBVOL && 140 + le64_to_cpu(d.v->d_inum) == d.k->p.inode, c, err, 141 + dirent_to_itself, 142 + "dirent points to own directory"); 143 + fsck_err: 144 + return ret; 155 145 } 156 146 157 147 void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
+1 -1
fs/bcachefs/dirent.h
··· 7 7 enum bkey_invalid_flags; 8 8 extern const struct bch_hash_desc bch2_dirent_hash_desc; 9 9 10 - int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c, 10 + int bch2_dirent_invalid(struct bch_fs *, struct bkey_s_c, 11 11 enum bkey_invalid_flags, struct printbuf *); 12 12 void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 13 13
+13 -16
fs/bcachefs/ec.c
··· 105 105 106 106 /* Stripes btree keys: */ 107 107 108 - int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k, 108 + int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k, 109 109 enum bkey_invalid_flags flags, 110 110 struct printbuf *err) 111 111 { 112 112 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v; 113 + int ret = 0; 113 114 114 - if (bkey_eq(k.k->p, POS_MIN)) { 115 - prt_printf(err, "stripe at POS_MIN"); 116 - return -BCH_ERR_invalid_bkey; 117 - } 115 + bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) || 116 + bpos_gt(k.k->p, POS(0, U32_MAX)), c, err, 117 + stripe_pos_bad, 118 + "stripe at bad pos"); 118 119 119 - if (k.k->p.inode) { 120 - prt_printf(err, "nonzero inode field"); 121 - return -BCH_ERR_invalid_bkey; 122 - } 120 + bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err, 121 + stripe_val_size_bad, 122 + "incorrect value size (%zu < %u)", 123 + bkey_val_u64s(k.k), stripe_val_u64s(s)); 123 124 124 - if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) { 125 - prt_printf(err, "incorrect value size (%zu < %u)", 126 - bkey_val_u64s(k.k), stripe_val_u64s(s)); 127 - return -BCH_ERR_invalid_bkey; 128 - } 129 - 130 - return bch2_bkey_ptrs_invalid(c, k, flags, err); 125 + ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 126 + fsck_err: 127 + return ret; 131 128 } 132 129 133 130 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
+1 -1
fs/bcachefs/ec.h
··· 8 8 9 9 enum bkey_invalid_flags; 10 10 11 - int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c, 11 + int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c, 12 12 enum bkey_invalid_flags, struct printbuf *); 13 13 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *, 14 14 struct bkey_s_c);
+6 -1
fs/bcachefs/error.c
··· 141 141 return s; 142 142 } 143 143 144 - int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...) 144 + int bch2_fsck_err(struct bch_fs *c, 145 + enum bch_fsck_flags flags, 146 + enum bch_sb_error_id err, 147 + const char *fmt, ...) 145 148 { 146 149 struct fsck_err_state *s = NULL; 147 150 va_list args; 148 151 bool print = true, suppressing = false, inconsistent = false; 149 152 struct printbuf buf = PRINTBUF, *out = &buf; 150 153 int ret = -BCH_ERR_fsck_ignore; 154 + 155 + bch2_sb_error_count(c, err); 151 156 152 157 va_start(args, fmt); 153 158 prt_vprintf(out, fmt, args);
+58 -22
fs/bcachefs/error.h
··· 4 4 5 5 #include <linux/list.h> 6 6 #include <linux/printk.h> 7 + #include "sb-errors.h" 7 8 8 9 struct bch_dev; 9 10 struct bch_fs; ··· 102 101 char *last_msg; 103 102 }; 104 103 105 - #define FSCK_CAN_FIX (1 << 0) 106 - #define FSCK_CAN_IGNORE (1 << 1) 107 - #define FSCK_NEED_FSCK (1 << 2) 108 - #define FSCK_NO_RATELIMIT (1 << 3) 104 + enum bch_fsck_flags { 105 + FSCK_CAN_FIX = 1 << 0, 106 + FSCK_CAN_IGNORE = 1 << 1, 107 + FSCK_NEED_FSCK = 1 << 2, 108 + FSCK_NO_RATELIMIT = 1 << 3, 109 + }; 109 110 110 - __printf(3, 4) __cold 111 - int bch2_fsck_err(struct bch_fs *, unsigned, const char *, ...); 111 + #define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err) 112 + 113 + __printf(4, 5) __cold 114 + int bch2_fsck_err(struct bch_fs *, 115 + enum bch_fsck_flags, 116 + enum bch_sb_error_id, 117 + const char *, ...); 112 118 void bch2_flush_fsck_errs(struct bch_fs *); 113 119 114 - #define __fsck_err(c, _flags, msg, ...) \ 120 + #define __fsck_err(c, _flags, _err_type, ...) \ 115 121 ({ \ 116 - int _ret = bch2_fsck_err(c, _flags, msg, ##__VA_ARGS__); \ 122 + int _ret = bch2_fsck_err(c, _flags, BCH_FSCK_ERR_##_err_type, \ 123 + __VA_ARGS__); \ 117 124 \ 118 125 if (_ret != -BCH_ERR_fsck_fix && \ 119 126 _ret != -BCH_ERR_fsck_ignore) { \ ··· 136 127 137 128 /* XXX: mark in superblock that filesystem contains errors, if we ignore: */ 138 129 139 - #define __fsck_err_on(cond, c, _flags, ...) \ 140 - (unlikely(cond) ? __fsck_err(c, _flags, ##__VA_ARGS__) : false) 130 + #define __fsck_err_on(cond, c, _flags, _err_type, ...) \ 131 + (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false) 141 132 142 - #define need_fsck_err_on(cond, c, ...) \ 143 - __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__) 133 + #define need_fsck_err_on(cond, c, _err_type, ...) \ 134 + __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__) 144 135 145 - #define need_fsck_err(c, ...) \ 146 - __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__) 136 + #define need_fsck_err(c, _err_type, ...) \ 137 + __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__) 147 138 148 - #define mustfix_fsck_err(c, ...) \ 149 - __fsck_err(c, FSCK_CAN_FIX, ##__VA_ARGS__) 139 + #define mustfix_fsck_err(c, _err_type, ...) \ 140 + __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__) 150 141 151 - #define mustfix_fsck_err_on(cond, c, ...) \ 152 - __fsck_err_on(cond, c, FSCK_CAN_FIX, ##__VA_ARGS__) 142 + #define mustfix_fsck_err_on(cond, c, _err_type, ...) \ 143 + __fsck_err_on(cond, c, FSCK_CAN_FIX, _err_type, __VA_ARGS__) 153 144 154 - #define fsck_err(c, ...) \ 155 - __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__) 145 + #define fsck_err(c, _err_type, ...) \ 146 + __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) 156 147 157 - #define fsck_err_on(cond, c, ...) \ 158 - __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__) 148 + #define fsck_err_on(cond, c, _err_type, ...) \ 149 + __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__) 150 + 151 + static inline void bch2_bkey_fsck_err(struct bch_fs *c, 152 + struct printbuf *err_msg, 153 + enum bch_sb_error_id err_type, 154 + const char *fmt, ...) 155 + { 156 + va_list args; 157 + 158 + va_start(args, fmt); 159 + prt_vprintf(err_msg, fmt, args); 160 + va_end(args); 161 + 162 + } 163 + 164 + #define bkey_fsck_err(c, _err_msg, _err_type, ...) \ 165 + do { \ 166 + prt_printf(_err_msg, __VA_ARGS__); \ 167 + bch2_sb_error_count(c, BCH_FSCK_ERR_##_err_type); \ 168 + ret = -BCH_ERR_invalid_bkey; \ 169 + goto fsck_err; \ 170 + } while (0) 171 + 172 + #define bkey_fsck_err_on(cond, ...) \ 173 + do { \ 174 + if (unlikely(cond)) \ 175 + bkey_fsck_err(__VA_ARGS__); \ 176 + } while (0) 159 177 160 178 /* 161 179 * Fatal errors: these don't indicate a bug, but we can't continue running in RW
+106 -130
fs/bcachefs/extents.c
··· 163 163 164 164 /* KEY_TYPE_btree_ptr: */ 165 165 166 - int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k, 166 + int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k, 167 167 enum bkey_invalid_flags flags, 168 168 struct printbuf *err) 169 169 { 170 - if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) { 171 - prt_printf(err, "value too big (%zu > %u)", 172 - bkey_val_u64s(k.k), BCH_REPLICAS_MAX); 173 - return -BCH_ERR_invalid_bkey; 174 - } 170 + int ret = 0; 175 171 176 - return bch2_bkey_ptrs_invalid(c, k, flags, err); 172 + bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err, 173 + btree_ptr_val_too_big, 174 + "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX); 175 + 176 + ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 177 + fsck_err: 178 + return ret; 177 179 } 178 180 179 181 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c, ··· 184 182 bch2_bkey_ptrs_to_text(out, c, k); 185 183 } 186 184 187 - int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k, 185 + int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k, 188 186 enum bkey_invalid_flags flags, 189 187 struct printbuf *err) 190 188 { 191 - if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) { 192 - prt_printf(err, "value too big (%zu > %zu)", 193 - bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX); 194 - return -BCH_ERR_invalid_bkey; 195 - } 189 + int ret = 0; 196 190 197 - return bch2_bkey_ptrs_invalid(c, k, flags, err); 191 + bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err, 192 + btree_ptr_v2_val_too_big, 193 + "value too big (%zu > %zu)", 194 + bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX); 195 + 196 + ret = bch2_bkey_ptrs_invalid(c, k, flags, err); 197 + fsck_err: 198 + return ret; 198 199 } 199 200 200 201 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c, ··· 378 373 379 374 /* KEY_TYPE_reservation: */ 380 375 381 - int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k, 376 + int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k, 382 377 enum bkey_invalid_flags flags, 383 378 struct printbuf *err) 384 379 { 385 380 struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k); 381 + int ret = 0; 386 382 387 - if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) { 388 - prt_printf(err, "invalid nr_replicas (%u)", 389 - r.v->nr_replicas); 390 - return -BCH_ERR_invalid_bkey; 391 - } 392 - 393 - return 0; 383 + bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err, 384 + reservation_key_nr_replicas_invalid, 385 + "invalid nr_replicas (%u)", r.v->nr_replicas); 386 + fsck_err: 387 + return ret; 394 388 } 395 389 396 390 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c, ··· 1062 1058 } 1063 1059 } 1064 1060 1065 - static int extent_ptr_invalid(const struct bch_fs *c, 1061 + static int extent_ptr_invalid(struct bch_fs *c, 1066 1062 struct bkey_s_c k, 1067 1063 enum bkey_invalid_flags flags, 1068 1064 const struct bch_extent_ptr *ptr, ··· 1075 1071 u64 bucket; 1076 1072 u32 bucket_offset; 1077 1073 struct bch_dev *ca; 1074 + int ret = 0; 1078 1075 1079 1076 if (!bch2_dev_exists2(c, ptr->dev)) { 1080 1077 /* ··· 1086 1081 if (flags & BKEY_INVALID_WRITE) 1087 1082 return 0; 1088 1083 1089 - prt_printf(err, "pointer to invalid device (%u)", ptr->dev); 1090 - return -BCH_ERR_invalid_bkey; 1084 + bkey_fsck_err(c, err, ptr_to_invalid_device, 1085 + "pointer to invalid device (%u)", ptr->dev); 1091 1086 } 1092 1087 1093 1088 ca = bch_dev_bkey_exists(c, ptr->dev); 1094 1089 bkey_for_each_ptr(ptrs, ptr2) 1095 - if (ptr != ptr2 && ptr->dev == ptr2->dev) { 1096 - prt_printf(err, "multiple pointers to same device (%u)", ptr->dev); 1097 - return -BCH_ERR_invalid_bkey; 1098 - } 1090 + bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err, 1091 + ptr_to_duplicate_device, 1092 + "multiple pointers to same device (%u)", ptr->dev); 1099 1093 1100 1094 bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset); 1101 1095 1102 - if (bucket >= ca->mi.nbuckets) { 1103 - prt_printf(err, "pointer past last bucket (%llu > %llu)", 1104 - bucket, ca->mi.nbuckets); 1105 - return -BCH_ERR_invalid_bkey; 1106 - } 1107 - 1108 - if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) { 1109 - prt_printf(err, "pointer before first bucket (%llu < %u)", 1110 - bucket, ca->mi.first_bucket); 1111 - return -BCH_ERR_invalid_bkey; 1112 - } 1113 - 1114 - if (bucket_offset + size_ondisk > ca->mi.bucket_size) { 1115 - prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)", 1096 + bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err, 1097 + ptr_after_last_bucket, 1098 + "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets); 1099 + bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err, 1100 + ptr_before_first_bucket, 1101 + "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket); 1102 + bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err, 1103 + ptr_spans_multiple_buckets, 1104 + "pointer spans multiple buckets (%u + %u > %u)", 1116 1105 bucket_offset, size_ondisk, ca->mi.bucket_size); 1117 - return -BCH_ERR_invalid_bkey; 1118 - } 1119 - 1120 - return 0; 1106 + fsck_err: 1107 + return ret; 1121 1108 } 1122 1109 1123 - int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k, 1110 + int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k, 1124 1111 enum bkey_invalid_flags flags, 1125 1112 struct printbuf *err) 1126 1113 { ··· 1122 1125 unsigned size_ondisk = k.k->size; 1123 1126 unsigned nonce = UINT_MAX; 1124 1127 unsigned nr_ptrs = 0; 1125 - bool unwritten = false, have_ec = false, crc_since_last_ptr = false; 1126 - int ret; 1128 + bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false; 1129 + int ret = 0; 1127 1130 1128 1131 if (bkey_is_btree_ptr(k.k)) 1129 1132 size_ondisk = btree_sectors(c); 1130 1133 1131 1134 bkey_extent_entry_for_each(ptrs, entry) { 1132 - if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) { 1133 - prt_printf(err, "invalid extent entry type (got %u, max %u)", 1134 - __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX); 1135 - return -BCH_ERR_invalid_bkey; 1136 - } 1135 + bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err, 1136 + extent_ptrs_invalid_entry, 1137 + "invalid extent entry type (got %u, max %u)", 1138 + __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX); 1137 1139 1138 - if (bkey_is_btree_ptr(k.k) && 1139 - !extent_entry_is_ptr(entry)) { 1140 - prt_printf(err, "has non ptr field"); 1141 - return -BCH_ERR_invalid_bkey; 1142 - } 1140 + bkey_fsck_err_on(bkey_is_btree_ptr(k.k) && 1141 + !extent_entry_is_ptr(entry), c, err, 1142 + btree_ptr_has_non_ptr, 1143 + "has non ptr field"); 1143 1144 1144 1145 switch (extent_entry_type(entry)) { 1145 1146 case BCH_EXTENT_ENTRY_ptr: ··· 1146 1151 if (ret) 1147 1152 return ret; 1148 1153 1149 - if (nr_ptrs && unwritten != entry->ptr.unwritten) { 1150 - prt_printf(err, "extent with unwritten and written ptrs"); 1151 - return -BCH_ERR_invalid_bkey; 1152 - } 1154 + bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err, 1155 + ptr_cached_and_erasure_coded, 1156 + "cached, erasure coded ptr"); 1153 1157 1154 - if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) { 1155 - prt_printf(err, "has unwritten ptrs"); 1156 - return -BCH_ERR_invalid_bkey; 1157 - } 1158 + if (!entry->ptr.unwritten) 1159 + have_written = true; 1160 + else 1161 + have_unwritten = true; 1158 1162 1159 - if (entry->ptr.cached && have_ec) { 1160 - prt_printf(err, "cached, erasure coded ptr"); 1161 - return -BCH_ERR_invalid_bkey; 1162 - } 1163 - 1164 - unwritten = entry->ptr.unwritten; 1165 1163 have_ec = false; 1166 1164 crc_since_last_ptr = false; 1167 1165 nr_ptrs++; ··· 1164 1176 case BCH_EXTENT_ENTRY_crc128: 1165 1177 crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry)); 1166 1178 1167 - if (crc.offset + crc.live_size > 1168 - crc.uncompressed_size) { 1169 - prt_printf(err, "checksum offset + key size > uncompressed size"); 1170 - return -BCH_ERR_invalid_bkey; 1171 - } 1172 - 1173 - size_ondisk = crc.compressed_size; 1174 - 1175 - if (!bch2_checksum_type_valid(c, crc.csum_type)) { 1176 - prt_printf(err, "invalid checksum type"); 1177 - return -BCH_ERR_invalid_bkey; 1178 - } 1179 - 1180 - if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) { 1181 - prt_printf(err, "invalid compression type"); 1182 - return -BCH_ERR_invalid_bkey; 1183 - } 1179 + bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err, 1180 + ptr_crc_uncompressed_size_too_small, 1181 + "checksum offset + key size > uncompressed size"); 1182 + bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err, 1183 + ptr_crc_csum_type_unknown, 1184 + "invalid checksum type"); 1185 + bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err, 1186 + ptr_crc_compression_type_unknown, 1187 + "invalid compression type"); 1184 1188 1185 1189 if (bch2_csum_type_is_encryption(crc.csum_type)) { 1186 1190 if (nonce == UINT_MAX) 1187 1191 nonce = crc.offset + crc.nonce; 1188 - else if (nonce != crc.offset + crc.nonce) { 1189 - prt_printf(err, "incorrect nonce"); 1190 - return -BCH_ERR_invalid_bkey; 1191 - } 1192 + else if (nonce != crc.offset + crc.nonce) 1193 + bkey_fsck_err(c, err, ptr_crc_nonce_mismatch, 1194 + "incorrect nonce"); 1192 1195 } 1193 1196 1194 - if (crc_since_last_ptr) { 1195 - prt_printf(err, "redundant crc entry"); 1196 - return -BCH_ERR_invalid_bkey; 1197 - } 1197 + bkey_fsck_err_on(crc_since_last_ptr, c, err, 1198 + ptr_crc_redundant, 1199 + "redundant crc entry"); 1198 1200 crc_since_last_ptr = true; 1199 1201 1200 - if (crc_is_encoded(crc) && 1201 - (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && 1202 - (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT))) { 1203 - prt_printf(err, "too large encoded extent"); 1204 - return -BCH_ERR_invalid_bkey; 1205 - } 1202 + bkey_fsck_err_on(crc_is_encoded(crc) && 1203 + (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) && 1204 + (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err, 1205 + ptr_crc_uncompressed_size_too_big, 1206 + "too large encoded extent"); 1206 1207 1208 + size_ondisk = crc.compressed_size; 1207 1209 break; 1208 1210 case BCH_EXTENT_ENTRY_stripe_ptr: 1209 - if (have_ec) { 1210 - prt_printf(err, "redundant stripe entry"); 1211 - return -BCH_ERR_invalid_bkey; 1212 - } 1211 + bkey_fsck_err_on(have_ec, c, err, 1212 + ptr_stripe_redundant, 1213 + "redundant stripe entry"); 1213 1214 have_ec = true; 1214 1215 break; 1215 1216 case BCH_EXTENT_ENTRY_rebalance: { ··· 1215 1238 } 1216 1239 } 1217 1240 1218 - if (!nr_ptrs) { 1219 - prt_str(err, "no ptrs"); 1220 - return -BCH_ERR_invalid_bkey; 1221 - } 1222 - 1223 - if (nr_ptrs >= BCH_BKEY_PTRS_MAX) { 1224 - prt_str(err, "too many ptrs"); 1225 - return -BCH_ERR_invalid_bkey; 1226 - } 1227 - 1228 - if (crc_since_last_ptr) { 1229 - prt_printf(err, "redundant crc entry"); 1230 - return -BCH_ERR_invalid_bkey; 1231 - } 1232 - 1233 - if (have_ec) { 1234 - prt_printf(err, "redundant stripe entry"); 1235 - return -BCH_ERR_invalid_bkey; 1236 - } 1237 - 1238 - return 0; 1241 + bkey_fsck_err_on(!nr_ptrs, c, err, 1242 + extent_ptrs_no_ptrs, 1243 + "no ptrs"); 1244 + bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err, 1245 + extent_ptrs_too_many_ptrs, 1246 + "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX); 1247 + bkey_fsck_err_on(have_written && have_unwritten, c, err, 1248 + extent_ptrs_written_and_unwritten, 1249 + "extent with unwritten and written ptrs"); 1250 + bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err, 1251 + extent_ptrs_unwritten, 1252 + "has unwritten ptrs"); 1253 + bkey_fsck_err_on(crc_since_last_ptr, c, err, 1254 + extent_ptrs_redundant_crc, 1255 + "redundant crc entry"); 1256 + bkey_fsck_err_on(have_ec, c, err, 1257 + extent_ptrs_redundant_stripe, 1258 + "redundant stripe entry"); 1259 + fsck_err: 1260 + return ret; 1239 1261 } 1240 1262 1241 1263 void bch2_ptr_swab(struct bkey_s k)
+4 -4
fs/bcachefs/extents.h
··· 400 400 401 401 /* KEY_TYPE_btree_ptr: */ 402 402 403 - int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c, 403 + int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c, 404 404 enum bkey_invalid_flags, struct printbuf *); 405 405 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *, 406 406 struct bkey_s_c); 407 407 408 - int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c, 408 + int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c, 409 409 enum bkey_invalid_flags, struct printbuf *); 410 410 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 411 411 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned, ··· 445 445 446 446 /* KEY_TYPE_reservation: */ 447 447 448 - int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c, 448 + int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c, 449 449 enum bkey_invalid_flags, struct printbuf *); 450 450 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 451 451 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c); ··· 705 705 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s); 706 706 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *, 707 707 struct bkey_s_c); 708 - int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c, 708 + int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c, 709 709 enum bkey_invalid_flags, struct printbuf *); 710 710 711 711 void bch2_ptr_swab(struct bkey_s);
+60 -36
fs/bcachefs/fsck.c
··· 721 721 int ret = 0; 722 722 723 723 if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c, 724 - "key in missing snapshot: %s", 725 - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 724 + bkey_in_missing_snapshot, 725 + "key in missing snapshot: %s", 726 + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 726 727 ret = bch2_btree_delete_at(trans, iter, 727 728 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1; 728 729 fsck_err: ··· 792 791 793 792 if (fsck_err_on(k.k->type == desc.key_type && 794 793 !desc.cmp_bkey(k, hash_k), c, 794 + hash_table_key_duplicate, 795 795 "duplicate hash table keys:\n%s", 796 796 (printbuf_reset(&buf), 797 797 bch2_bkey_val_to_text(&buf, c, hash_k), ··· 811 809 printbuf_exit(&buf); 812 810 return ret; 813 811 bad_hash: 814 - if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s", 812 + if (fsck_err(c, hash_table_key_wrong_offset, 813 + "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s", 815 814 bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash, 816 815 (printbuf_reset(&buf), 817 816 bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) { ··· 863 860 *prev = u; 864 861 865 862 if (fsck_err_on(prev->bi_hash_seed != u.bi_hash_seed || 866 - inode_d_type(prev) != inode_d_type(&u), c, 863 + inode_d_type(prev) != inode_d_type(&u), 864 + c, inode_snapshot_mismatch, 867 865 "inodes in different snapshots don't match")) { 868 866 bch_err(c, "repair not implemented yet"); 869 867 return -EINVAL; ··· 892 888 893 889 if (u.bi_flags & BCH_INODE_UNLINKED && 894 890 (!c->sb.clean || 895 - fsck_err(c, "filesystem marked clean, but inode %llu unlinked", 891 + fsck_err(c, inode_unlinked_but_clean, 892 + "filesystem marked clean, but inode %llu unlinked", 896 893 u.bi_inum))) { 897 894 bch2_trans_unlock(trans); 898 895 bch2_fs_lazy_rw(c); ··· 905 900 906 901 if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY && 907 902 (!c->sb.clean || 908 - fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty", 903 + fsck_err(c, inode_i_size_dirty_but_clean, 904 + "filesystem marked clean, but inode %llu has i_size dirty", 909 905 u.bi_inum))) { 910 906 bch_verbose(c, "truncating inode %llu", u.bi_inum); 911 907 ··· 938 932 939 933 if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY && 940 934 (!c->sb.clean || 941 - fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty", 935 + fsck_err(c, inode_i_sectors_dirty_but_clean, 936 + "filesystem marked clean, but inode %llu has i_sectors dirty", 942 937 u.bi_inum))) { 943 938 s64 sectors; 944 939 ··· 1065 1058 return -BCH_ERR_internal_fsck_err; 1066 1059 } 1067 1060 1068 - if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c, 1069 - "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu", 1070 - w->last_pos.inode, i->snapshot, 1071 - i->inode.bi_sectors, i->count)) { 1061 + if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), 1062 + c, inode_i_sectors_wrong, 1063 + "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu", 1064 + w->last_pos.inode, i->snapshot, 1065 + i->inode.bi_sectors, i->count)) { 1072 1066 i->inode.bi_sectors = i->count; 1073 1067 ret = fsck_write_inode(trans, &i->inode, i->snapshot); 1074 1068 if (ret) ··· 1210 1202 prt_printf(&buf, "\n overwriting %s extent", 1211 1203 pos1.snapshot >= pos2.p.snapshot ? "first" : "second"); 1212 1204 1213 - if (fsck_err(c, "overlapping extents%s", buf.buf)) { 1205 + if (fsck_err(c, extent_overlapping, 1206 + "overlapping extents%s", buf.buf)) { 1214 1207 struct btree_iter *old_iter = &iter1; 1215 1208 struct disk_reservation res = { 0 }; 1216 1209 ··· 1366 1357 goto err; 1367 1358 1368 1359 if (k.k->type != KEY_TYPE_whiteout) { 1369 - if (fsck_err_on(!i, c, 1360 + if (fsck_err_on(!i, c, extent_in_missing_inode, 1370 1361 "extent in missing inode:\n %s", 1371 1362 (printbuf_reset(&buf), 1372 1363 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) ··· 1374 1365 1375 1366 if (fsck_err_on(i && 1376 1367 !S_ISREG(i->inode.bi_mode) && 1377 - !S_ISLNK(i->inode.bi_mode), c, 1368 + !S_ISLNK(i->inode.bi_mode), 1369 + c, extent_in_non_reg_inode, 1378 1370 "extent in non regular inode mode %o:\n %s", 1379 1371 i->inode.bi_mode, 1380 1372 (printbuf_reset(&buf), ··· 1407 1397 if (k.k->type != KEY_TYPE_whiteout) { 1408 1398 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) && 1409 1399 k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 && 1410 - !bkey_extent_is_reservation(k), c, 1400 + !bkey_extent_is_reservation(k), 1401 + c, extent_past_end_of_inode, 1411 1402 "extent type past end of inode %llu:%u, i_size %llu\n %s", 1412 1403 i->inode.bi_inum, i->snapshot, i->inode.bi_size, 1413 1404 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { ··· 1530 1519 continue; 1531 1520 } 1532 1521 1533 - if (fsck_err_on(i->inode.bi_nlink != i->count, c, 1522 + if (fsck_err_on(i->inode.bi_nlink != i->count, 1523 + c, inode_dir_wrong_nlink, 1534 1524 "directory %llu:%u with wrong i_nlink: got %u, should be %llu", 1535 1525 w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) { 1536 1526 i->inode.bi_nlink = i->count; ··· 1575 1563 backpointer_exists = ret; 1576 1564 ret = 0; 1577 1565 1578 - if (fsck_err_on(S_ISDIR(target->bi_mode) && 1579 - backpointer_exists, c, 1566 + if (fsck_err_on(S_ISDIR(target->bi_mode) && backpointer_exists, 1567 + c, inode_dir_multiple_links, 1580 1568 "directory %llu with multiple links", 1581 1569 target->bi_inum)) { 1582 1570 ret = __remove_dirent(trans, d.k->p); 1583 1571 goto out; 1584 1572 } 1585 1573 1586 - if (fsck_err_on(backpointer_exists && 1587 - !target->bi_nlink, c, 1574 + if (fsck_err_on(backpointer_exists && !target->bi_nlink, 1575 + c, inode_multiple_links_but_nlink_0, 1588 1576 "inode %llu type %s has multiple links but i_nlink 0", 1589 1577 target->bi_inum, bch2_d_types[d.v->d_type])) { 1590 1578 target->bi_nlink++; ··· 1595 1583 goto err; 1596 1584 } 1597 1585 1598 - if (fsck_err_on(!backpointer_exists, c, 1586 + if (fsck_err_on(!backpointer_exists, 1587 + c, inode_wrong_backpointer, 1599 1588 "inode %llu:%u has wrong backpointer:\n" 1600 1589 "got %llu:%llu\n" 1601 1590 "should be %llu:%llu", ··· 1614 1601 } 1615 1602 } 1616 1603 1617 - if (fsck_err_on(d.v->d_type != inode_d_type(target), c, 1604 + if (fsck_err_on(d.v->d_type != inode_d_type(target), 1605 + c, dirent_d_type_wrong, 1618 1606 "incorrect d_type: got %s, should be %s:\n%s", 1619 1607 bch2_d_type_str(d.v->d_type), 1620 1608 bch2_d_type_str(inode_d_type(target)), ··· 1639 1625 if (d.v->d_type == DT_SUBVOL && 1640 1626 target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol) && 1641 1627 (c->sb.version < bcachefs_metadata_version_subvol_dirent || 1642 - fsck_err(c, "dirent has wrong d_parent_subvol field: got %u, should be %u", 1628 + fsck_err(c, dirent_d_parent_subvol_wrong, 1629 + "dirent has wrong d_parent_subvol field: got %u, should be %u", 1643 1630 le32_to_cpu(d.v->d_parent_subvol), 1644 1631 target->bi_parent_subvol))) { 1645 1632 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k)); ··· 1712 1697 *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode); 1713 1698 dir->first_this_inode = false; 1714 1699 1715 - if (fsck_err_on(!i, c, 1700 + if (fsck_err_on(!i, c, dirent_in_missing_dir_inode, 1716 1701 "dirent in nonexisting directory:\n%s", 1717 1702 (printbuf_reset(&buf), 1718 1703 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { ··· 1724 1709 if (!i) 1725 1710 goto out; 1726 1711 1727 - if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c, 1712 + if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), 1713 + c, dirent_in_non_dir_inode, 1728 1714 "dirent in non directory inode type %s:\n%s", 1729 1715 bch2_d_type_str(inode_d_type(&i->inode)), 1730 1716 (printbuf_reset(&buf), ··· 1759 1743 if (ret && !bch2_err_matches(ret, ENOENT)) 1760 1744 goto err; 1761 1745 1762 - if (fsck_err_on(ret, c, 1746 + if (fsck_err_on(ret, c, dirent_to_missing_subvol, 1763 1747 "dirent points to missing subvolume %u", 1764 1748 le32_to_cpu(d.v->d_child_subvol))) { 1765 1749 ret = __remove_dirent(trans, d.k->p); ··· 1771 1755 if (ret && !bch2_err_matches(ret, ENOENT)) 1772 1756 goto err; 1773 1757 1774 - if (fsck_err_on(ret, c, 1758 + if (fsck_err_on(ret, c, subvol_to_missing_root, 1775 1759 "subvolume %u points to missing subvolume root %llu", 1776 1760 target_subvol, 1777 1761 target_inum)) { ··· 1780 1764 goto err; 1781 1765 } 1782 1766 1783 - if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c, 1767 + if (fsck_err_on(subvol_root.bi_subvol != target_subvol, 1768 + c, subvol_root_wrong_bi_subvol, 1784 1769 "subvol root %llu has wrong bi_subvol field: got %u, should be %u", 1785 1770 target_inum, 1786 1771 subvol_root.bi_subvol, target_subvol)) { ··· 1800 1783 if (ret) 1801 1784 goto err; 1802 1785 1803 - if (fsck_err_on(!target->inodes.nr, c, 1786 + if (fsck_err_on(!target->inodes.nr, 1787 + c, dirent_to_missing_inode, 1804 1788 "dirent points to missing inode: (equiv %u)\n%s", 1805 1789 equiv.snapshot, 1806 1790 (printbuf_reset(&buf), ··· 1887 1869 *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode); 1888 1870 inode->first_this_inode = false; 1889 1871 1890 - if (fsck_err_on(!i, c, 1872 + if (fsck_err_on(!i, c, xattr_in_missing_inode, 1891 1873 "xattr for missing inode %llu", 1892 1874 k.k->p.inode)) 1893 1875 return bch2_btree_delete_at(trans, iter, 0); ··· 1936 1918 if (ret && !bch2_err_matches(ret, ENOENT)) 1937 1919 return ret; 1938 1920 1939 - if (mustfix_fsck_err_on(ret, c, "root subvol missing")) { 1921 + if (mustfix_fsck_err_on(ret, c, root_subvol_missing, 1922 + "root subvol missing")) { 1940 1923 struct bkey_i_subvolume root_subvol; 1941 1924 1942 1925 snapshot = U32_MAX; ··· 1963 1944 if (ret && !bch2_err_matches(ret, ENOENT)) 1964 1945 return ret; 1965 1946 1966 - if (mustfix_fsck_err_on(ret, c, "root directory missing") || 1967 - mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), c, 1947 + if (mustfix_fsck_err_on(ret, c, root_dir_missing, 1948 + "root directory missing") || 1949 + mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), 1950 + c, root_inode_not_dir, 1968 1951 "root inode not a directory")) { 1969 1952 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 1970 1953 0, NULL); ··· 2070 2049 } 2071 2050 2072 2051 if (bch2_err_matches(ret, ENOENT)) { 2073 - if (fsck_err(c, "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu", 2052 + if (fsck_err(c, inode_unreachable, 2053 + "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu", 2074 2054 inode->bi_inum, snapshot, 2075 2055 bch2_d_type_str(inode_d_type(inode)), 2076 2056 inode->bi_nlink, ··· 2111 2089 pr_err("%llu:%u", i->inum, i->snapshot); 2112 2090 pr_err("%llu:%u", inode->bi_inum, snapshot); 2113 2091 2114 - if (!fsck_err(c, "directory structure loop")) 2092 + if (!fsck_err(c, dir_loop, 2093 + "directory structure loop")) 2115 2094 return 0; 2116 2095 2117 2096 ret = commit_do(trans, NULL, NULL, ··· 2372 2349 link = &links->d[++*idx]; 2373 2350 } 2374 2351 2375 - if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c, 2352 + if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, 2353 + c, inode_wrong_nlink, 2376 2354 "inode %llu type %s has wrong i_nlink (%u, should be %u)", 2377 2355 u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)], 2378 2356 bch2_inode_nlink_get(&u), link->count)) {
+73 -70
fs/bcachefs/inode.c
··· 398 398 return &inode_p->inode.k_i; 399 399 } 400 400 401 - static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err) 401 + static int __bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k, struct printbuf *err) 402 402 { 403 403 struct bch_inode_unpacked unpacked; 404 + int ret = 0; 404 405 405 - if (k.k->p.inode) { 406 - prt_printf(err, "nonzero k.p.inode"); 407 - return -BCH_ERR_invalid_bkey; 408 - } 406 + bkey_fsck_err_on(k.k->p.inode, c, err, 407 + inode_pos_inode_nonzero, 408 + "nonzero k.p.inode"); 409 409 410 - if (k.k->p.offset < BLOCKDEV_INODE_MAX) { 411 - prt_printf(err, "fs inode in blockdev range"); 412 - return -BCH_ERR_invalid_bkey; 413 - } 410 + bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX, c, err, 411 + inode_pos_blockdev_range, 412 + "fs inode in blockdev range"); 414 413 415 - if (bch2_inode_unpack(k, &unpacked)) { 416 - prt_printf(err, "invalid variable length fields"); 417 - return -BCH_ERR_invalid_bkey; 418 - } 414 + bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked), c, err, 415 + inode_unpack_error, 416 + "invalid variable length fields"); 419 417 420 - if (unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1) { 421 - prt_printf(err, "invalid data checksum type (%u >= %u", 422 - unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1); 423 - return -BCH_ERR_invalid_bkey; 424 - } 418 + bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1, c, err, 419 + inode_checksum_type_invalid, 420 + "invalid data checksum type (%u >= %u", 421 + unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1); 425 422 426 - if (unpacked.bi_compression && 427 - !bch2_compression_opt_valid(unpacked.bi_compression - 1)) { 428 - prt_printf(err, "invalid compression opt %u", 429 - unpacked.bi_compression - 1); 430 - return -BCH_ERR_invalid_bkey; 431 - } 423 + bkey_fsck_err_on(unpacked.bi_compression && 424 + !bch2_compression_opt_valid(unpacked.bi_compression - 1), c, err, 425 + inode_compression_type_invalid, 426 + "invalid compression opt %u", unpacked.bi_compression - 1); 432 427 433 - if ((unpacked.bi_flags & BCH_INODE_UNLINKED) && 434 - unpacked.bi_nlink != 0) { 435 - prt_printf(err, "flagged as unlinked but bi_nlink != 0"); 436 - return -BCH_ERR_invalid_bkey; 437 - } 428 + bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_UNLINKED) && 429 + unpacked.bi_nlink != 0, c, err, 430 + inode_unlinked_but_nlink_nonzero, 431 + "flagged as unlinked but bi_nlink != 0"); 438 432 439 - if (unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode)) { 440 - prt_printf(err, "subvolume root but not a directory"); 441 - return -BCH_ERR_invalid_bkey; 442 - } 443 - 444 - return 0; 433 + bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode), c, err, 434 + inode_subvol_root_but_not_dir, 435 + "subvolume root but not a directory"); 436 + fsck_err: 437 + return ret; 445 438 } 446 439 447 - int bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k, 440 + int bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k, 448 441 enum bkey_invalid_flags flags, 449 442 struct printbuf *err) 450 443 { 451 444 struct bkey_s_c_inode inode = bkey_s_c_to_inode(k); 445 + int ret = 0; 452 446 453 - if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR) { 454 - prt_printf(err, "invalid str hash type (%llu >= %u)", 455 - INODE_STR_HASH(inode.v), BCH_STR_HASH_NR); 456 - return -BCH_ERR_invalid_bkey; 457 - } 447 + bkey_fsck_err_on(INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err, 448 + inode_str_hash_invalid, 449 + "invalid str hash type (%llu >= %u)", 450 + INODE_STR_HASH(inode.v), BCH_STR_HASH_NR); 458 451 459 - return __bch2_inode_invalid(k, err); 452 + ret = __bch2_inode_invalid(c, k, err); 453 + fsck_err: 454 + return ret; 460 455 } 461 456 462 - int bch2_inode_v2_invalid(const struct bch_fs *c, struct bkey_s_c k, 457 + int bch2_inode_v2_invalid(struct bch_fs *c, struct bkey_s_c k, 463 458 enum bkey_invalid_flags flags, 464 459 struct printbuf *err) 465 460 { 466 461 struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k); 462 + int ret = 0; 467 463 468 - if (INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR) { 469 - prt_printf(err, "invalid str hash type (%llu >= %u)", 470 - INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR); 471 - return -BCH_ERR_invalid_bkey; 472 - } 464 + bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err, 465 + inode_str_hash_invalid, 466 + "invalid str hash type (%llu >= %u)", 467 + INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR); 473 468 474 - return __bch2_inode_invalid(k, err); 469 + ret = __bch2_inode_invalid(c, k, err); 470 + fsck_err: 471 + return ret; 475 472 } 476 473 477 - int bch2_inode_v3_invalid(const struct bch_fs *c, struct bkey_s_c k, 474 + int bch2_inode_v3_invalid(struct bch_fs *c, struct bkey_s_c k, 478 475 enum bkey_invalid_flags flags, 479 476 struct printbuf *err) 480 477 { 481 478 struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k); 479 + int ret = 0; 482 480 483 - if (INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL || 484 - INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k)) { 485 - prt_printf(err, "invalid fields_start (got %llu, min %u max %zu)", 486 - INODEv3_FIELDS_START(inode.v), 487 - INODEv3_FIELDS_START_INITIAL, 488 - bkey_val_u64s(inode.k)); 489 - return -BCH_ERR_invalid_bkey; 490 - } 481 + bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL || 482 + INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k), c, err, 483 + inode_v3_fields_start_bad, 484 + "invalid fields_start (got %llu, min %u max %zu)", 485 + INODEv3_FIELDS_START(inode.v), 486 + INODEv3_FIELDS_START_INITIAL, 487 + bkey_val_u64s(inode.k)); 491 488 492 - if (INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR) { 493 - prt_printf(err, "invalid str hash type (%llu >= %u)", 494 - INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR); 495 - return -BCH_ERR_invalid_bkey; 496 - } 489 + bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err, 490 + inode_str_hash_invalid, 491 + "invalid str hash type (%llu >= %u)", 492 + INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR); 497 493 498 - return __bch2_inode_invalid(k, err); 494 + ret = __bch2_inode_invalid(c, k, err); 495 + fsck_err: 496 + return ret; 499 497 } 500 498 501 499 static void __bch2_inode_unpacked_to_text(struct printbuf *out, ··· 610 612 return 0; 611 613 } 612 614 613 - int bch2_inode_generation_invalid(const struct bch_fs *c, struct bkey_s_c k, 615 + int bch2_inode_generation_invalid(struct bch_fs *c, struct bkey_s_c k, 614 616 enum bkey_invalid_flags flags, 615 617 struct printbuf *err) 616 618 { 617 - if (k.k->p.inode) { 618 - prt_printf(err, "nonzero k.p.inode"); 619 - return -BCH_ERR_invalid_bkey; 620 - } 619 + int ret = 0; 621 620 622 - return 0; 621 + bkey_fsck_err_on(k.k->p.inode, c, err, 622 + inode_pos_inode_nonzero, 623 + "nonzero k.p.inode"); 624 + fsck_err: 625 + return ret; 623 626 } 624 627 625 628 void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c, ··· 1067 1068 return 0; 1068 1069 1069 1070 if (!fsck_err_on(c->sb.clean, c, 1071 + deleted_inode_but_clean, 1070 1072 "filesystem marked as clean but have deleted inode %llu:%u", 1071 1073 pos.offset, pos.snapshot)) 1072 1074 return 0; ··· 1079 1079 1080 1080 ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode; 1081 1081 if (fsck_err_on(!bkey_is_inode(k.k), c, 1082 + deleted_inode_missing, 1082 1083 "nonexistent inode %llu:%u in deleted_inodes btree", 1083 1084 pos.offset, pos.snapshot)) 1084 1085 goto delete; ··· 1089 1088 goto err; 1090 1089 1091 1090 if (fsck_err_on(S_ISDIR(inode.bi_mode), c, 1091 + deleted_inode_is_dir, 1092 1092 "directory %llu:%u in deleted_inodes btree", 1093 1093 pos.offset, pos.snapshot)) 1094 1094 goto delete; 1095 1095 1096 1096 if (fsck_err_on(!(inode.bi_flags & BCH_INODE_UNLINKED), c, 1097 + deleted_inode_not_unlinked, 1097 1098 "non-deleted inode %llu:%u in deleted_inodes btree", 1098 1099 pos.offset, pos.snapshot)) 1099 1100 goto delete;
+4 -4
fs/bcachefs/inode.h
··· 8 8 enum bkey_invalid_flags; 9 9 extern const char * const bch2_inode_opts[]; 10 10 11 - int bch2_inode_invalid(const struct bch_fs *, struct bkey_s_c, 11 + int bch2_inode_invalid(struct bch_fs *, struct bkey_s_c, 12 12 enum bkey_invalid_flags, struct printbuf *); 13 - int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c, 13 + int bch2_inode_v2_invalid(struct bch_fs *, struct bkey_s_c, 14 14 enum bkey_invalid_flags, struct printbuf *); 15 - int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c, 15 + int bch2_inode_v3_invalid(struct bch_fs *, struct bkey_s_c, 16 16 enum bkey_invalid_flags, struct printbuf *); 17 17 void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 18 18 ··· 52 52 k->type == KEY_TYPE_inode_v3; 53 53 } 54 54 55 - int bch2_inode_generation_invalid(const struct bch_fs *, struct bkey_s_c, 55 + int bch2_inode_generation_invalid(struct bch_fs *, struct bkey_s_c, 56 56 enum bkey_invalid_flags, struct printbuf *); 57 57 void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 58 58
+54 -19
fs/bcachefs/journal_io.c
··· 140 140 if (!dup->csum_good) 141 141 goto replace; 142 142 143 - fsck_err(c, "found duplicate but non identical journal entries (seq %llu)", 143 + fsck_err(c, journal_entry_replicas_data_mismatch, 144 + "found duplicate but non identical journal entries (seq %llu)", 144 145 le64_to_cpu(j->seq)); 145 146 i = dup; 146 147 goto found; ··· 236 235 prt_str(out, ": "); 237 236 } 238 237 239 - #define journal_entry_err(c, version, jset, entry, msg, ...) \ 238 + #define journal_entry_err(c, version, jset, entry, _err, msg, ...) \ 240 239 ({ \ 241 240 struct printbuf _buf = PRINTBUF; \ 242 241 \ ··· 245 244 \ 246 245 switch (flags & BKEY_INVALID_WRITE) { \ 247 246 case READ: \ 248 - mustfix_fsck_err(c, "%s", _buf.buf); \ 247 + mustfix_fsck_err(c, _err, "%s", _buf.buf); \ 249 248 break; \ 250 249 case WRITE: \ 250 + bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \ 251 251 bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\ 252 252 if (bch2_fs_inconsistent(c)) { \ 253 253 ret = -BCH_ERR_fsck_errors_not_fixed; \ ··· 261 259 true; \ 262 260 }) 263 261 264 - #define journal_entry_err_on(cond, c, version, jset, entry, msg, ...) \ 265 - ((cond) ? journal_entry_err(c, version, jset, entry, msg, ##__VA_ARGS__) : false) 262 + #define journal_entry_err_on(cond, ...) \ 263 + ((cond) ? journal_entry_err(__VA_ARGS__) : false) 266 264 267 265 #define FSCK_DELETED_KEY 5 268 266 ··· 279 277 struct printbuf buf = PRINTBUF; 280 278 int ret = 0; 281 279 282 - if (journal_entry_err_on(!k->k.u64s, c, version, jset, entry, "k->u64s 0")) { 280 + if (journal_entry_err_on(!k->k.u64s, 281 + c, version, jset, entry, 282 + journal_entry_bkey_u64s_0, 283 + "k->u64s 0")) { 283 284 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); 284 285 journal_entry_null_range(vstruct_next(entry), next); 285 286 return FSCK_DELETED_KEY; ··· 291 286 if (journal_entry_err_on((void *) bkey_next(k) > 292 287 (void *) vstruct_next(entry), 293 288 c, version, jset, entry, 289 + journal_entry_bkey_past_end, 294 290 "extends past end of journal entry")) { 295 291 entry->u64s = cpu_to_le16((u64 *) k - entry->_data); 296 292 journal_entry_null_range(vstruct_next(entry), next); ··· 300 294 301 295 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, 302 296 c, version, jset, entry, 297 + journal_entry_bkey_bad_format, 303 298 "bad format %u", k->k.format)) { 304 299 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); 305 300 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); ··· 324 317 bch2_bkey_invalid(c, bkey_i_to_s_c(k), 325 318 __btree_node_type(level, btree_id), write, &buf); 326 319 327 - mustfix_fsck_err(c, "%s", buf.buf); 320 + mustfix_fsck_err(c, journal_entry_bkey_invalid, 321 + "%s", buf.buf); 328 322 329 323 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); 330 324 memmove(k, bkey_next(k), next - (void *) bkey_next(k)); ··· 395 387 if (journal_entry_err_on(!entry->u64s || 396 388 le16_to_cpu(entry->u64s) != k->k.u64s, 397 389 c, version, jset, entry, 390 + journal_entry_btree_root_bad_size, 398 391 "invalid btree root journal entry: wrong number of keys")) { 399 392 void *next = vstruct_next(entry); 400 393 /* ··· 445 436 446 437 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, 447 438 c, version, jset, entry, 439 + journal_entry_blacklist_bad_size, 448 440 "invalid journal seq blacklist entry: bad size")) { 449 441 journal_entry_null_range(entry, vstruct_next(entry)); 450 442 } ··· 473 463 474 464 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, 475 465 c, version, jset, entry, 466 + journal_entry_blacklist_v2_bad_size, 476 467 "invalid journal seq blacklist entry: bad size")) { 477 468 journal_entry_null_range(entry, vstruct_next(entry)); 478 469 goto out; ··· 484 473 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) > 485 474 le64_to_cpu(bl_entry->end), 486 475 c, version, jset, entry, 476 + journal_entry_blacklist_v2_start_past_end, 487 477 "invalid journal seq blacklist entry: start > end")) { 488 478 journal_entry_null_range(entry, vstruct_next(entry)); 489 479 } ··· 517 505 518 506 if (journal_entry_err_on(bytes < sizeof(*u), 519 507 c, version, jset, entry, 508 + journal_entry_usage_bad_size, 520 509 "invalid journal entry usage: bad size")) { 521 510 journal_entry_null_range(entry, vstruct_next(entry)); 522 511 return ret; ··· 552 539 if (journal_entry_err_on(bytes < sizeof(*u) || 553 540 bytes < sizeof(*u) + u->r.nr_devs, 554 541 c, version, jset, entry, 542 + journal_entry_data_usage_bad_size, 555 543 "invalid journal entry usage: bad size")) { 556 544 journal_entry_null_range(entry, vstruct_next(entry)); 557 545 return ret; ··· 584 570 int ret = 0; 585 571 586 572 if (journal_entry_err_on(bytes != sizeof(*clock), 587 - c, version, jset, entry, "bad size")) { 573 + c, version, jset, entry, 574 + journal_entry_clock_bad_size, 575 + "bad size")) { 588 576 journal_entry_null_range(entry, vstruct_next(entry)); 589 577 return ret; 590 578 } 591 579 592 580 if (journal_entry_err_on(clock->rw > 1, 593 - c, version, jset, entry, "bad rw")) { 581 + c, version, jset, entry, 582 + journal_entry_clock_bad_rw, 583 + "bad rw")) { 594 584 journal_entry_null_range(entry, vstruct_next(entry)); 595 585 return ret; 596 586 } ··· 626 608 int ret = 0; 627 609 628 610 if (journal_entry_err_on(bytes < expected, 629 - c, version, jset, entry, "bad size (%u < %u)", 611 + c, version, jset, entry, 612 + journal_entry_dev_usage_bad_size, 613 + "bad size (%u < %u)", 630 614 bytes, expected)) { 631 615 journal_entry_null_range(entry, vstruct_next(entry)); 632 616 return ret; ··· 637 617 dev = le32_to_cpu(u->dev); 638 618 639 619 if (journal_entry_err_on(!bch2_dev_exists2(c, dev), 640 - c, version, jset, entry, "bad dev")) { 620 + c, version, jset, entry, 621 + journal_entry_dev_usage_bad_dev, 622 + "bad dev")) { 641 623 journal_entry_null_range(entry, vstruct_next(entry)); 642 624 return ret; 643 625 } 644 626 645 627 if (journal_entry_err_on(u->pad, 646 - c, version, jset, entry, "bad pad")) { 628 + c, version, jset, entry, 629 + journal_entry_dev_usage_bad_pad, 630 + "bad pad")) { 647 631 journal_entry_null_range(entry, vstruct_next(entry)); 648 632 return ret; 649 633 } ··· 762 738 763 739 vstruct_for_each(jset, entry) { 764 740 if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset), 765 - c, version, jset, entry, 741 + c, version, jset, entry, 742 + journal_entry_past_jset_end, 766 743 "journal entry extends past end of jset")) { 767 744 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data); 768 745 break; ··· 792 767 version = le32_to_cpu(jset->version); 793 768 if (journal_entry_err_on(!bch2_version_compatible(version), 794 769 c, version, jset, NULL, 770 + jset_unsupported_version, 795 771 "%s sector %llu seq %llu: incompatible journal entry version %u.%u", 796 772 ca ? ca->name : c->name, 797 773 sector, le64_to_cpu(jset->seq), ··· 803 777 } 804 778 805 779 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), 806 - c, version, jset, NULL, 780 + c, version, jset, NULL, 781 + jset_unknown_csum, 807 782 "%s sector %llu seq %llu: journal entry with unknown csum type %llu", 808 783 ca ? ca->name : c->name, 809 784 sector, le64_to_cpu(jset->seq), ··· 815 788 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) && 816 789 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), 817 790 c, version, jset, NULL, 791 + jset_last_seq_newer_than_seq, 818 792 "invalid journal entry: last_seq > seq (%llu > %llu)", 819 793 le64_to_cpu(jset->last_seq), 820 794 le64_to_cpu(jset->seq))) { ··· 844 816 845 817 version = le32_to_cpu(jset->version); 846 818 if (journal_entry_err_on(!bch2_version_compatible(version), 847 - c, version, jset, NULL, 819 + c, version, jset, NULL, 820 + jset_unsupported_version, 848 821 "%s sector %llu seq %llu: unknown journal entry version %u.%u", 849 822 ca ? ca->name : c->name, 850 823 sector, le64_to_cpu(jset->seq), ··· 860 831 return JOURNAL_ENTRY_REREAD; 861 832 862 833 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, 863 - c, version, jset, NULL, 834 + c, version, jset, NULL, 835 + jset_past_bucket_end, 864 836 "%s sector %llu seq %llu: journal entry too big (%zu bytes)", 865 837 ca ? ca->name : c->name, 866 838 sector, le64_to_cpu(jset->seq), bytes)) ··· 1203 1173 1204 1174 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), 1205 1175 c, le32_to_cpu(i->j.version), &i->j, NULL, 1176 + jset_last_seq_newer_than_seq, 1206 1177 "invalid journal entry: last_seq > seq (%llu > %llu)", 1207 1178 le64_to_cpu(i->j.last_seq), 1208 1179 le64_to_cpu(i->j.seq))) ··· 1220 1189 } 1221 1190 1222 1191 if (!*last_seq) { 1223 - fsck_err(c, "journal read done, but no entries found after dropping non-flushes"); 1192 + fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes, 1193 + "journal read done, but no entries found after dropping non-flushes"); 1224 1194 return 0; 1225 1195 } 1226 1196 ··· 1247 1215 1248 1216 if (bch2_journal_seq_is_blacklisted(c, seq, true)) { 1249 1217 fsck_err_on(!JSET_NO_FLUSH(&i->j), c, 1218 + jset_seq_blacklisted, 1250 1219 "found blacklisted journal entry %llu", seq); 1251 1220 i->ignore = true; 1252 1221 } ··· 1288 1255 bch2_journal_ptrs_to_text(&buf2, c, i); 1289 1256 1290 1257 missing_end = seq - 1; 1291 - fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" 1258 + fsck_err(c, journal_entries_missing, 1259 + "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" 1292 1260 " prev at %s\n" 1293 1261 " next at %s", 1294 1262 missing_start, missing_end, ··· 1344 1310 if (!degraded && 1345 1311 !bch2_replicas_marked(c, &replicas.e) && 1346 1312 (le64_to_cpu(i->j.seq) == *last_seq || 1347 - fsck_err(c, "superblock not marked as containing replicas for journal entry %llu\n %s", 1313 + fsck_err(c, journal_entry_replicas_not_marked, 1314 + "superblock not marked as containing replicas for journal entry %llu\n %s", 1348 1315 le64_to_cpu(i->j.seq), buf.buf))) { 1349 1316 ret = bch2_mark_replicas(c, &replicas.e); 1350 1317 if (ret)
+10 -8
fs/bcachefs/lru.c
··· 10 10 #include "recovery.h" 11 11 12 12 /* KEY_TYPE_lru is obsolete: */ 13 - int bch2_lru_invalid(const struct bch_fs *c, struct bkey_s_c k, 13 + int bch2_lru_invalid(struct bch_fs *c, struct bkey_s_c k, 14 14 enum bkey_invalid_flags flags, 15 15 struct printbuf *err) 16 16 { 17 - if (!lru_pos_time(k.k->p)) { 18 - prt_printf(err, "lru entry at time=0"); 19 - return -BCH_ERR_invalid_bkey; 17 + int ret = 0; 20 18 21 - } 22 - 23 - return 0; 19 + bkey_fsck_err_on(!lru_pos_time(k.k->p), c, err, 20 + lru_entry_at_time_0, 21 + "lru entry at time=0"); 22 + fsck_err: 23 + return ret; 24 24 } 25 25 26 26 void bch2_lru_to_text(struct printbuf *out, struct bch_fs *c, ··· 95 95 int ret; 96 96 97 97 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_pos), c, 98 + lru_entry_to_invalid_bucket, 98 99 "lru key points to nonexistent device:bucket %llu:%llu", 99 100 alloc_pos.inode, alloc_pos.offset)) 100 101 return bch2_btree_delete_at(trans, lru_iter, 0); ··· 126 125 } 127 126 128 127 if (c->opts.reconstruct_alloc || 129 - fsck_err(c, "incorrect lru entry: lru %s time %llu\n" 128 + fsck_err(c, lru_entry_bad, 129 + "incorrect lru entry: lru %s time %llu\n" 130 130 " %s\n" 131 131 " for %s", 132 132 bch2_lru_types[type],
+1 -1
fs/bcachefs/lru.h
··· 48 48 return BCH_LRU_read; 49 49 } 50 50 51 - int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c, 51 + int bch2_lru_invalid(struct bch_fs *, struct bkey_s_c, 52 52 enum bkey_invalid_flags, struct printbuf *); 53 53 void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 54 54
+8 -7
fs/bcachefs/quota.c
··· 59 59 .to_text = bch2_sb_quota_to_text, 60 60 }; 61 61 62 - int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k, 62 + int bch2_quota_invalid(struct bch_fs *c, struct bkey_s_c k, 63 63 enum bkey_invalid_flags flags, 64 64 struct printbuf *err) 65 65 { 66 - if (k.k->p.inode >= QTYP_NR) { 67 - prt_printf(err, "invalid quota type (%llu >= %u)", 68 - k.k->p.inode, QTYP_NR); 69 - return -BCH_ERR_invalid_bkey; 70 - } 66 + int ret = 0; 71 67 72 - return 0; 68 + bkey_fsck_err_on(k.k->p.inode >= QTYP_NR, c, err, 69 + quota_type_invalid, 70 + "invalid quota type (%llu >= %u)", 71 + k.k->p.inode, QTYP_NR); 72 + fsck_err: 73 + return ret; 73 74 } 74 75 75 76 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
+1 -1
fs/bcachefs/quota.h
··· 8 8 enum bkey_invalid_flags; 9 9 extern const struct bch_sb_field_ops bch_sb_field_ops_quota; 10 10 11 - int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c, 11 + int bch2_quota_invalid(struct bch_fs *, struct bkey_s_c, 12 12 enum bkey_invalid_flags, struct printbuf *); 13 13 void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 14 14
+8 -2
fs/bcachefs/recovery.c
··· 365 365 } 366 366 367 367 if (r->error) { 368 - __fsck_err(c, btree_id_is_alloc(i) 368 + __fsck_err(c, 369 + btree_id_is_alloc(i) 369 370 ? FSCK_CAN_IGNORE : 0, 371 + btree_root_bkey_invalid, 370 372 "invalid btree root %s", 371 373 bch2_btree_id_str(i)); 372 374 if (i == BTREE_ID_alloc) ··· 378 376 ret = bch2_btree_root_read(c, i, &r->key, r->level); 379 377 if (ret) { 380 378 fsck_err(c, 379 + btree_root_read_error, 381 380 "error reading btree root %s", 382 381 bch2_btree_id_str(i)); 383 382 if (btree_id_is_alloc(i)) ··· 717 714 if (mustfix_fsck_err_on(c->sb.clean && 718 715 last_journal_entry && 719 716 !journal_entry_empty(last_journal_entry), c, 717 + clean_but_journal_not_empty, 720 718 "filesystem marked clean but journal not empty")) { 721 719 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info); 722 720 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); ··· 725 721 } 726 722 727 723 if (!last_journal_entry) { 728 - fsck_err_on(!c->sb.clean, c, "no journal entries found"); 724 + fsck_err_on(!c->sb.clean, c, 725 + dirty_but_no_journal_entries, 726 + "no journal entries found"); 729 727 if (clean) 730 728 goto use_clean; 731 729
+3 -3
fs/bcachefs/reflink.c
··· 28 28 29 29 /* reflink pointers */ 30 30 31 - int bch2_reflink_p_invalid(const struct bch_fs *c, struct bkey_s_c k, 31 + int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k, 32 32 enum bkey_invalid_flags flags, 33 33 struct printbuf *err) 34 34 { ··· 75 75 76 76 /* indirect extents */ 77 77 78 - int bch2_reflink_v_invalid(const struct bch_fs *c, struct bkey_s_c k, 78 + int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k, 79 79 enum bkey_invalid_flags flags, 80 80 struct printbuf *err) 81 81 { ··· 126 126 127 127 /* indirect inline data */ 128 128 129 - int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k, 129 + int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k, 130 130 enum bkey_invalid_flags flags, 131 131 struct printbuf *err) 132 132 {
+3 -3
fs/bcachefs/reflink.h
··· 4 4 5 5 enum bkey_invalid_flags; 6 6 7 - int bch2_reflink_p_invalid(const struct bch_fs *, struct bkey_s_c, 7 + int bch2_reflink_p_invalid(struct bch_fs *, struct bkey_s_c, 8 8 enum bkey_invalid_flags, struct printbuf *); 9 9 void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *, 10 10 struct bkey_s_c); ··· 19 19 .min_val_size = 16, \ 20 20 }) 21 21 22 - int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c, 22 + int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c, 23 23 enum bkey_invalid_flags, struct printbuf *); 24 24 void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *, 25 25 struct bkey_s_c); ··· 35 35 .min_val_size = 8, \ 36 36 }) 37 37 38 - int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c, 38 + int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c, 39 39 enum bkey_invalid_flags, struct printbuf *); 40 40 void bch2_indirect_inline_data_to_text(struct printbuf *, 41 41 struct bch_fs *, struct bkey_s_c);
+3
fs/bcachefs/sb-clean.c
··· 82 82 int ret = 0; 83 83 84 84 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c, 85 + sb_clean_journal_seq_mismatch, 85 86 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown", 86 87 le64_to_cpu(clean->journal_seq), 87 88 le64_to_cpu(j->seq))) { ··· 120 119 k1->k.u64s != k2->k.u64s || 121 120 memcmp(k1, k2, bkey_bytes(&k1->k)) || 122 121 l1 != l2, c, 122 + sb_clean_btree_root_mismatch, 123 123 "superblock btree root %u doesn't match journal after clean shutdown\n" 124 124 "sb: l=%u %s\n" 125 125 "journal: l=%u %s\n", i, ··· 142 140 sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean); 143 141 144 142 if (fsck_err_on(!sb_clean, c, 143 + sb_clean_missing, 145 144 "superblock marked clean but clean section not present")) { 146 145 SET_BCH_SB_CLEAN(c->disk_sb.sb, false); 147 146 c->sb.clean = false;
+245 -1
fs/bcachefs/sb-errors.h
··· 4 4 5 5 #include "sb-errors_types.h" 6 6 7 - #define BCH_SB_ERRS() 7 + #define BCH_SB_ERRS() \ 8 + x(clean_but_journal_not_empty, 0) \ 9 + x(dirty_but_no_journal_entries, 1) \ 10 + x(dirty_but_no_journal_entries_post_drop_nonflushes, 2) \ 11 + x(sb_clean_journal_seq_mismatch, 3) \ 12 + x(sb_clean_btree_root_mismatch, 4) \ 13 + x(sb_clean_missing, 5) \ 14 + x(jset_unsupported_version, 6) \ 15 + x(jset_unknown_csum, 7) \ 16 + x(jset_last_seq_newer_than_seq, 8) \ 17 + x(jset_past_bucket_end, 9) \ 18 + x(jset_seq_blacklisted, 10) \ 19 + x(journal_entries_missing, 11) \ 20 + x(journal_entry_replicas_not_marked, 12) \ 21 + x(journal_entry_past_jset_end, 13) \ 22 + x(journal_entry_replicas_data_mismatch, 14) \ 23 + x(journal_entry_bkey_u64s_0, 15) \ 24 + x(journal_entry_bkey_past_end, 16) \ 25 + x(journal_entry_bkey_bad_format, 17) \ 26 + x(journal_entry_bkey_invalid, 18) \ 27 + x(journal_entry_btree_root_bad_size, 19) \ 28 + x(journal_entry_blacklist_bad_size, 20) \ 29 + x(journal_entry_blacklist_v2_bad_size, 21) \ 30 + x(journal_entry_blacklist_v2_start_past_end, 22) \ 31 + x(journal_entry_usage_bad_size, 23) \ 32 + x(journal_entry_data_usage_bad_size, 24) \ 33 + x(journal_entry_clock_bad_size, 25) \ 34 + x(journal_entry_clock_bad_rw, 26) \ 35 + x(journal_entry_dev_usage_bad_size, 27) \ 36 + x(journal_entry_dev_usage_bad_dev, 28) \ 37 + x(journal_entry_dev_usage_bad_pad, 29) \ 38 + x(btree_node_unreadable, 30) \ 39 + x(btree_node_fault_injected, 31) \ 40 + x(btree_node_bad_magic, 32) \ 41 + x(btree_node_bad_seq, 33) \ 42 + x(btree_node_unsupported_version, 34) \ 43 + x(btree_node_bset_older_than_sb_min, 35) \ 44 + x(btree_node_bset_newer_than_sb, 36) \ 45 + x(btree_node_data_missing, 37) \ 46 + x(btree_node_bset_after_end, 38) \ 47 + x(btree_node_replicas_sectors_written_mismatch, 39) \ 48 + x(btree_node_replicas_data_mismatch, 40) \ 49 + x(bset_unknown_csum, 41) \ 50 + x(bset_bad_csum, 42) \ 51 + x(bset_past_end_of_btree_node, 43) \ 52 + x(bset_wrong_sector_offset, 44) \ 53 + x(bset_empty, 45) \ 54 + x(bset_bad_seq, 46) \ 55 + x(bset_blacklisted_journal_seq, 47) \ 56 + x(first_bset_blacklisted_journal_seq, 48) \ 57 + x(btree_node_bad_btree, 49) \ 58 + x(btree_node_bad_level, 50) \ 59 + x(btree_node_bad_min_key, 51) \ 60 + x(btree_node_bad_max_key, 52) \ 61 + x(btree_node_bad_format, 53) \ 62 + x(btree_node_bkey_past_bset_end, 54) \ 63 + x(btree_node_bkey_bad_format, 55) \ 64 + x(btree_node_bad_bkey, 56) \ 65 + x(btree_node_bkey_out_of_order, 57) \ 66 + x(btree_root_bkey_invalid, 58) \ 67 + x(btree_root_read_error, 59) \ 68 + x(btree_root_bad_min_key, 50) \ 69 + x(btree_root_bad_max_key, 61) \ 70 + x(btree_node_read_error, 62) \ 71 + x(btree_node_topology_bad_min_key, 63) \ 72 + x(btree_node_topology_bad_max_key, 64) \ 73 + x(btree_node_topology_overwritten_by_prev_node, 65) \ 74 + x(btree_node_topology_overwritten_by_next_node, 66) \ 75 + x(btree_node_topology_interior_node_empty, 67) \ 76 + x(fs_usage_hidden_wrong, 68) \ 77 + x(fs_usage_btree_wrong, 69) \ 78 + x(fs_usage_data_wrong, 70) \ 79 + x(fs_usage_cached_wrong, 71) \ 80 + x(fs_usage_reserved_wrong, 72) \ 81 + x(fs_usage_persistent_reserved_wrong, 73) \ 82 + x(fs_usage_nr_inodes_wrong, 74) \ 83 + x(fs_usage_replicas_wrong, 75) \ 84 + x(dev_usage_buckets_wrong, 76) \ 85 + x(dev_usage_sectors_wrong, 77) \ 86 + x(dev_usage_fragmented_wrong, 78) \ 87 + x(dev_usage_buckets_ec_wrong, 79) \ 88 + x(bkey_version_in_future, 80) \ 89 + x(bkey_u64s_too_small, 81) \ 90 + x(bkey_invalid_type_for_btree, 82) \ 91 + x(bkey_extent_size_zero, 83) \ 92 + x(bkey_extent_size_greater_than_offset, 84) \ 93 + x(bkey_size_nonzero, 85) \ 94 + x(bkey_snapshot_nonzero, 86) \ 95 + x(bkey_snapshot_zero, 87) \ 96 + x(bkey_at_pos_max, 88) \ 97 + x(bkey_before_start_of_btree_node, 89) \ 98 + x(bkey_after_end_of_btree_node, 90) \ 99 + x(bkey_val_size_nonzero, 91) \ 100 + x(bkey_val_size_too_small, 92) \ 101 + x(alloc_v1_val_size_bad, 93) \ 102 + x(alloc_v2_unpack_error, 94) \ 103 + x(alloc_v3_unpack_error, 95) \ 104 + x(alloc_v4_val_size_bad, 96) \ 105 + x(alloc_v4_backpointers_start_bad, 97) \ 106 + x(alloc_key_data_type_bad, 98) \ 107 + x(alloc_key_empty_but_have_data, 99) \ 108 + x(alloc_key_dirty_sectors_0, 100) \ 109 + x(alloc_key_data_type_inconsistency, 101) \ 110 + x(alloc_key_to_missing_dev_bucket, 102) \ 111 + x(alloc_key_cached_inconsistency, 103) \ 112 + x(alloc_key_cached_but_read_time_zero, 104) \ 113 + x(alloc_key_to_missing_lru_entry, 105) \ 114 + x(alloc_key_data_type_wrong, 106) \ 115 + x(alloc_key_gen_wrong, 107) \ 116 + x(alloc_key_dirty_sectors_wrong, 108) \ 117 + x(alloc_key_cached_sectors_wrong, 109) \ 118 + x(alloc_key_stripe_wrong, 110) \ 119 + x(alloc_key_stripe_redundancy_wrong, 111) \ 120 + x(bucket_sector_count_overflow, 112) \ 121 + x(bucket_metadata_type_mismatch, 113) \ 122 + x(need_discard_key_wrong, 114) \ 123 + x(freespace_key_wrong, 115) \ 124 + x(freespace_hole_missing, 116) \ 125 + x(bucket_gens_val_size_bad, 117) \ 126 + x(bucket_gens_key_wrong, 118) \ 127 + x(bucket_gens_hole_wrong, 119) \ 128 + x(bucket_gens_to_invalid_dev, 120) \ 129 + x(bucket_gens_to_invalid_buckets, 121) \ 130 + x(bucket_gens_nonzero_for_invalid_buckets, 122) \ 131 + x(need_discard_freespace_key_to_invalid_dev_bucket, 123) \ 132 + x(need_discard_freespace_key_bad, 124) \ 133 + x(backpointer_pos_wrong, 125) \ 134 + x(backpointer_to_missing_device, 126) \ 135 + x(backpointer_to_missing_alloc, 127) \ 136 + x(backpointer_to_missing_ptr, 128) \ 137 + x(lru_entry_at_time_0, 129) \ 138 + x(lru_entry_to_invalid_bucket, 130) \ 139 + x(lru_entry_bad, 131) \ 140 + x(btree_ptr_val_too_big, 132) \ 141 + x(btree_ptr_v2_val_too_big, 133) \ 142 + x(btree_ptr_has_non_ptr, 134) \ 143 + x(extent_ptrs_invalid_entry, 135) \ 144 + x(extent_ptrs_no_ptrs, 136) \ 145 + x(extent_ptrs_too_many_ptrs, 137) \ 146 + x(extent_ptrs_redundant_crc, 138) \ 147 + x(extent_ptrs_redundant_stripe, 139) \ 148 + x(extent_ptrs_unwritten, 140) \ 149 + x(extent_ptrs_written_and_unwritten, 141) \ 150 + x(ptr_to_invalid_device, 142) \ 151 + x(ptr_to_duplicate_device, 143) \ 152 + x(ptr_after_last_bucket, 144) \ 153 + x(ptr_before_first_bucket, 145) \ 154 + x(ptr_spans_multiple_buckets, 146) \ 155 + x(ptr_to_missing_backpointer, 147) \ 156 + x(ptr_to_missing_alloc_key, 148) \ 157 + x(ptr_to_missing_replicas_entry, 149) \ 158 + x(ptr_to_missing_stripe, 150) \ 159 + x(ptr_to_incorrect_stripe, 151) \ 160 + x(ptr_gen_newer_than_bucket_gen, 152) \ 161 + x(ptr_too_stale, 153) \ 162 + x(stale_dirty_ptr, 154) \ 163 + x(ptr_bucket_data_type_mismatch, 155) \ 164 + x(ptr_cached_and_erasure_coded, 156) \ 165 + x(ptr_crc_uncompressed_size_too_small, 157) \ 166 + x(ptr_crc_csum_type_unknown, 158) \ 167 + x(ptr_crc_compression_type_unknown, 159) \ 168 + x(ptr_crc_redundant, 160) \ 169 + x(ptr_crc_uncompressed_size_too_big, 161) \ 170 + x(ptr_crc_nonce_mismatch, 162) \ 171 + x(ptr_stripe_redundant, 163) \ 172 + x(reservation_key_nr_replicas_invalid, 164) \ 173 + x(reflink_v_refcount_wrong, 165) \ 174 + x(reflink_p_to_missing_reflink_v, 166) \ 175 + x(stripe_pos_bad, 167) \ 176 + x(stripe_val_size_bad, 168) \ 177 + x(stripe_sector_count_wrong, 169) \ 178 + x(snapshot_tree_pos_bad, 170) \ 179 + x(snapshot_tree_to_missing_snapshot, 171) \ 180 + x(snapshot_tree_to_missing_subvol, 172) \ 181 + x(snapshot_tree_to_wrong_subvol, 173) \ 182 + x(snapshot_tree_to_snapshot_subvol, 174) \ 183 + x(snapshot_pos_bad, 175) \ 184 + x(snapshot_parent_bad, 176) \ 185 + x(snapshot_children_not_normalized, 177) \ 186 + x(snapshot_child_duplicate, 178) \ 187 + x(snapshot_child_bad, 179) \ 188 + x(snapshot_skiplist_not_normalized, 180) \ 189 + x(snapshot_skiplist_bad, 181) \ 190 + x(snapshot_should_not_have_subvol, 182) \ 191 + x(snapshot_to_bad_snapshot_tree, 183) \ 192 + x(snapshot_bad_depth, 184) \ 193 + x(snapshot_bad_skiplist, 185) \ 194 + x(subvol_pos_bad, 186) \ 195 + x(subvol_not_master_and_not_snapshot, 187) \ 196 + x(subvol_to_missing_root, 188) \ 197 + x(subvol_root_wrong_bi_subvol, 189) \ 198 + x(bkey_in_missing_snapshot, 190) \ 199 + x(inode_pos_inode_nonzero, 191) \ 200 + x(inode_pos_blockdev_range, 192) \ 201 + x(inode_unpack_error, 193) \ 202 + x(inode_str_hash_invalid, 194) \ 203 + x(inode_v3_fields_start_bad, 195) \ 204 + x(inode_snapshot_mismatch, 196) \ 205 + x(inode_unlinked_but_clean, 197) \ 206 + x(inode_unlinked_but_nlink_nonzero, 198) \ 207 + x(inode_checksum_type_invalid, 199) \ 208 + x(inode_compression_type_invalid, 200) \ 209 + x(inode_subvol_root_but_not_dir, 201) \ 210 + x(inode_i_size_dirty_but_clean, 202) \ 211 + x(inode_i_sectors_dirty_but_clean, 203) \ 212 + x(inode_i_sectors_wrong, 204) \ 213 + x(inode_dir_wrong_nlink, 205) \ 214 + x(inode_dir_multiple_links, 206) \ 215 + x(inode_multiple_links_but_nlink_0, 207) \ 216 + x(inode_wrong_backpointer, 208) \ 217 + x(inode_wrong_nlink, 209) \ 218 + x(inode_unreachable, 210) \ 219 + x(deleted_inode_but_clean, 211) \ 220 + x(deleted_inode_missing, 212) \ 221 + x(deleted_inode_is_dir, 213) \ 222 + x(deleted_inode_not_unlinked, 214) \ 223 + x(extent_overlapping, 215) \ 224 + x(extent_in_missing_inode, 216) \ 225 + x(extent_in_non_reg_inode, 217) \ 226 + x(extent_past_end_of_inode, 218) \ 227 + x(dirent_empty_name, 219) \ 228 + x(dirent_val_too_big, 220) \ 229 + x(dirent_name_too_long, 221) \ 230 + x(dirent_name_embedded_nul, 222) \ 231 + x(dirent_name_dot_or_dotdot, 223) \ 232 + x(dirent_name_has_slash, 224) \ 233 + x(dirent_d_type_wrong, 225) \ 234 + x(dirent_d_parent_subvol_wrong, 226) \ 235 + x(dirent_in_missing_dir_inode, 227) \ 236 + x(dirent_in_non_dir_inode, 228) \ 237 + x(dirent_to_missing_inode, 229) \ 238 + x(dirent_to_missing_subvol, 230) \ 239 + x(dirent_to_itself, 231) \ 240 + x(quota_type_invalid, 232) \ 241 + x(xattr_val_size_too_small, 233) \ 242 + x(xattr_val_size_too_big, 234) \ 243 + x(xattr_invalid_type, 235) \ 244 + x(xattr_name_invalid_chars, 236) \ 245 + x(xattr_in_missing_inode, 237) \ 246 + x(root_subvol_missing, 238) \ 247 + x(root_dir_missing, 239) \ 248 + x(root_inode_not_dir, 240) \ 249 + x(dir_loop, 241) \ 250 + x(hash_table_key_duplicate, 242) \ 251 + x(hash_table_key_wrong_offset, 243) 8 252 9 253 enum bch_sb_error_id { 10 254 #define x(t, n) BCH_FSCK_ERR_##t = n,
+53 -51
fs/bcachefs/snapshot.c
··· 30 30 le32_to_cpu(t.v->root_snapshot)); 31 31 } 32 32 33 - int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k, 33 + int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k, 34 34 enum bkey_invalid_flags flags, 35 35 struct printbuf *err) 36 36 { 37 - if (bkey_gt(k.k->p, POS(0, U32_MAX)) || 38 - bkey_lt(k.k->p, POS(0, 1))) { 39 - prt_printf(err, "bad pos"); 40 - return -BCH_ERR_invalid_bkey; 41 - } 37 + int ret = 0; 42 38 43 - return 0; 39 + bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) || 40 + bkey_lt(k.k->p, POS(0, 1)), c, err, 41 + snapshot_tree_pos_bad, 42 + "bad pos"); 43 + fsck_err: 44 + return ret; 44 45 } 45 46 46 47 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id, ··· 203 202 le32_to_cpu(s.v->skip[2])); 204 203 } 205 204 206 - int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k, 205 + int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k, 207 206 enum bkey_invalid_flags flags, 208 207 struct printbuf *err) 209 208 { 210 209 struct bkey_s_c_snapshot s; 211 210 u32 i, id; 211 + int ret = 0; 212 212 213 - if (bkey_gt(k.k->p, POS(0, U32_MAX)) || 214 - bkey_lt(k.k->p, POS(0, 1))) { 215 - prt_printf(err, "bad pos"); 216 - return -BCH_ERR_invalid_bkey; 217 - } 213 + bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) || 214 + bkey_lt(k.k->p, POS(0, 1)), c, err, 215 + snapshot_pos_bad, 216 + "bad pos"); 218 217 219 218 s = bkey_s_c_to_snapshot(k); 220 219 221 220 id = le32_to_cpu(s.v->parent); 222 - if (id && id <= k.k->p.offset) { 223 - prt_printf(err, "bad parent node (%u <= %llu)", 224 - id, k.k->p.offset); 225 - return -BCH_ERR_invalid_bkey; 226 - } 221 + bkey_fsck_err_on(id && id <= k.k->p.offset, c, err, 222 + snapshot_parent_bad, 223 + "bad parent node (%u <= %llu)", 224 + id, k.k->p.offset); 227 225 228 - if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) { 229 - prt_printf(err, "children not normalized"); 230 - return -BCH_ERR_invalid_bkey; 231 - } 226 + bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err, 227 + snapshot_children_not_normalized, 228 + "children not normalized"); 232 229 233 - if (s.v->children[0] && 234 - s.v->children[0] == s.v->children[1]) { 235 - prt_printf(err, "duplicate child nodes"); 236 - return -BCH_ERR_invalid_bkey; 237 - } 230 + bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err, 231 + snapshot_child_duplicate, 232 + "duplicate child nodes"); 238 233 239 234 for (i = 0; i < 2; i++) { 240 235 id = le32_to_cpu(s.v->children[i]); 241 236 242 - if (id >= k.k->p.offset) { 243 - prt_printf(err, "bad child node (%u >= %llu)", 244 - id, k.k->p.offset); 245 - return -BCH_ERR_invalid_bkey; 246 - } 237 + bkey_fsck_err_on(id >= k.k->p.offset, c, err, 238 + snapshot_child_bad, 239 + "bad child node (%u >= %llu)", 240 + id, k.k->p.offset); 247 241 } 248 242 249 243 if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) { 250 - if (le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) || 251 - le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2])) { 252 - prt_printf(err, "skiplist not normalized"); 253 - return -BCH_ERR_invalid_bkey; 254 - } 244 + bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) || 245 + le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err, 246 + snapshot_skiplist_not_normalized, 247 + "skiplist not normalized"); 255 248 256 249 for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) { 257 250 id = le32_to_cpu(s.v->skip[i]); 258 251 259 - if (id && id < le32_to_cpu(s.v->parent)) { 260 - prt_printf(err, "bad skiplist node %u", id); 261 - return -BCH_ERR_invalid_bkey; 262 - } 252 + bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err, 253 + snapshot_skiplist_bad, 254 + "bad skiplist node %u", id); 263 255 } 264 256 } 265 - 266 - return 0; 257 + fsck_err: 258 + return ret; 267 259 } 268 260 269 261 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id) ··· 523 529 if (fsck_err_on(ret || 524 530 root_id != bch2_snapshot_root(c, root_id) || 525 531 st.k->p.offset != le32_to_cpu(s.tree), 526 - c, 532 + c, snapshot_tree_to_missing_snapshot, 527 533 "snapshot tree points to missing/incorrect snapshot:\n %s", 528 534 (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) { 529 535 ret = bch2_btree_delete_at(trans, iter, 0); ··· 535 541 if (ret && !bch2_err_matches(ret, ENOENT)) 536 542 goto err; 537 543 538 - if (fsck_err_on(ret, c, 544 + if (fsck_err_on(ret, 545 + c, snapshot_tree_to_missing_subvol, 539 546 "snapshot tree points to missing subvolume:\n %s", 540 547 (printbuf_reset(&buf), 541 548 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) || 542 549 fsck_err_on(!bch2_snapshot_is_ancestor_early(c, 543 550 le32_to_cpu(subvol.snapshot), 544 - root_id), c, 551 + root_id), 552 + c, snapshot_tree_to_wrong_subvol, 545 553 "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s", 546 554 (printbuf_reset(&buf), 547 555 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) || 548 - fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c, 556 + fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), 557 + c, snapshot_tree_to_snapshot_subvol, 549 558 "snapshot tree points to snapshot subvolume:\n %s", 550 559 (printbuf_reset(&buf), 551 560 bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) { ··· 784 787 goto err; 785 788 } 786 789 } else { 787 - if (fsck_err_on(s.subvol, c, "snapshot should not point to subvol:\n %s", 790 + if (fsck_err_on(s.subvol, 791 + c, snapshot_should_not_have_subvol, 792 + "snapshot should not point to subvol:\n %s", 788 793 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 789 794 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 790 795 ret = PTR_ERR_OR_ZERO(u); ··· 802 803 if (ret < 0) 803 804 goto err; 804 805 805 - if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n %s", 806 + if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree, 807 + "snapshot points to missing/incorrect tree:\n %s", 806 808 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 807 809 ret = snapshot_tree_ptr_repair(trans, iter, k, &s); 808 810 if (ret) ··· 815 815 816 816 if (le32_to_cpu(s.depth) != real_depth && 817 817 (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists || 818 - fsck_err(c, "snapshot with incorrect depth field, should be %u:\n %s", 818 + fsck_err(c, snapshot_bad_depth, 819 + "snapshot with incorrect depth field, should be %u:\n %s", 819 820 real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) { 820 821 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 821 822 ret = PTR_ERR_OR_ZERO(u); ··· 833 832 834 833 if (!ret && 835 834 (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists || 836 - fsck_err(c, "snapshot with bad skiplist field:\n %s", 835 + fsck_err(c, snapshot_bad_skiplist, 836 + "snapshot with bad skiplist field:\n %s", 837 837 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) { 838 838 u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot); 839 839 ret = PTR_ERR_OR_ZERO(u);
+2 -2
fs/bcachefs/snapshot.h
··· 5 5 enum bkey_invalid_flags; 6 6 7 7 void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 8 - int bch2_snapshot_tree_invalid(const struct bch_fs *, struct bkey_s_c, 8 + int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c, 9 9 enum bkey_invalid_flags, struct printbuf *); 10 10 11 11 #define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \ ··· 19 19 int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *); 20 20 21 21 void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 22 - int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c, 22 + int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c, 23 23 enum bkey_invalid_flags, struct printbuf *); 24 24 int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned, 25 25 struct bkey_s_c, struct bkey_s_c, unsigned);
+10 -8
fs/bcachefs/subvolume.c
··· 62 62 if (ret) 63 63 return ret; 64 64 65 - if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c, 65 + if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, 66 + c, subvol_not_master_and_not_snapshot, 66 67 "subvolume %llu is not set as snapshot but is not master subvolume", 67 68 k.k->p.offset)) { 68 69 struct bkey_i_subvolume *s = ··· 98 97 99 98 /* Subvolumes: */ 100 99 101 - int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k, 100 + int bch2_subvolume_invalid(struct bch_fs *c, struct bkey_s_c k, 102 101 enum bkey_invalid_flags flags, struct printbuf *err) 103 102 { 104 - if (bkey_lt(k.k->p, SUBVOL_POS_MIN) || 105 - bkey_gt(k.k->p, SUBVOL_POS_MAX)) { 106 - prt_printf(err, "invalid pos"); 107 - return -BCH_ERR_invalid_bkey; 108 - } 103 + int ret = 0; 109 104 110 - return 0; 105 + bkey_fsck_err_on(bkey_lt(k.k->p, SUBVOL_POS_MIN) || 106 + bkey_gt(k.k->p, SUBVOL_POS_MAX), c, err, 107 + subvol_pos_bad, 108 + "invalid pos"); 109 + fsck_err: 110 + return ret; 111 111 } 112 112 113 113 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
+1 -1
fs/bcachefs/subvolume.h
··· 9 9 10 10 int bch2_check_subvols(struct bch_fs *); 11 11 12 - int bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c, 12 + int bch2_subvolume_invalid(struct bch_fs *, struct bkey_s_c, 13 13 enum bkey_invalid_flags, struct printbuf *); 14 14 void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 15 15
+22 -30
fs/bcachefs/xattr.c
··· 70 70 .cmp_bkey = xattr_cmp_bkey, 71 71 }; 72 72 73 - int bch2_xattr_invalid(const struct bch_fs *c, struct bkey_s_c k, 73 + int bch2_xattr_invalid(struct bch_fs *c, struct bkey_s_c k, 74 74 enum bkey_invalid_flags flags, 75 75 struct printbuf *err) 76 76 { 77 - const struct xattr_handler *handler; 78 77 struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k); 78 + unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len, 79 + le16_to_cpu(xattr.v->x_val_len)); 80 + int ret = 0; 79 81 80 - if (bkey_val_u64s(k.k) < 81 - xattr_val_u64s(xattr.v->x_name_len, 82 - le16_to_cpu(xattr.v->x_val_len))) { 83 - prt_printf(err, "value too small (%zu < %u)", 84 - bkey_val_u64s(k.k), 85 - xattr_val_u64s(xattr.v->x_name_len, 86 - le16_to_cpu(xattr.v->x_val_len))); 87 - return -BCH_ERR_invalid_bkey; 88 - } 82 + bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s, c, err, 83 + xattr_val_size_too_small, 84 + "value too small (%zu < %u)", 85 + bkey_val_u64s(k.k), val_u64s); 89 86 90 87 /* XXX why +4 ? */ 91 - if (bkey_val_u64s(k.k) > 92 - xattr_val_u64s(xattr.v->x_name_len, 93 - le16_to_cpu(xattr.v->x_val_len) + 4)) { 94 - prt_printf(err, "value too big (%zu > %u)", 95 - bkey_val_u64s(k.k), 96 - xattr_val_u64s(xattr.v->x_name_len, 97 - le16_to_cpu(xattr.v->x_val_len) + 4)); 98 - return -BCH_ERR_invalid_bkey; 99 - } 88 + val_u64s = xattr_val_u64s(xattr.v->x_name_len, 89 + le16_to_cpu(xattr.v->x_val_len) + 4); 100 90 101 - handler = bch2_xattr_type_to_handler(xattr.v->x_type); 102 - if (!handler) { 103 - prt_printf(err, "invalid type (%u)", xattr.v->x_type); 104 - return -BCH_ERR_invalid_bkey; 105 - } 91 + bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s, c, err, 92 + xattr_val_size_too_big, 93 + "value too big (%zu > %u)", 94 + bkey_val_u64s(k.k), val_u64s); 106 95 107 - if (memchr(xattr.v->x_name, '\0', xattr.v->x_name_len)) { 108 - prt_printf(err, "xattr name has invalid characters"); 109 - return -BCH_ERR_invalid_bkey; 110 - } 96 + bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type), c, err, 97 + xattr_invalid_type, 98 + "invalid type (%u)", xattr.v->x_type); 111 99 112 - return 0; 100 + bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len), c, err, 101 + xattr_name_invalid_chars, 102 + "xattr name has invalid characters"); 103 + fsck_err: 104 + return ret; 113 105 } 114 106 115 107 void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
+1 -1
fs/bcachefs/xattr.h
··· 6 6 7 7 extern const struct bch_hash_desc bch2_xattr_hash_desc; 8 8 9 - int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c, 9 + int bch2_xattr_invalid(struct bch_fs *, struct bkey_s_c, 10 10 enum bkey_invalid_flags, struct printbuf *); 11 11 void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c); 12 12